From abfd2ce8d7264ff1bca2e884cdada2b756243a8f Mon Sep 17 00:00:00 2001 From: Zunli Hu Date: Wed, 19 Aug 2020 11:22:01 +0800 Subject: [PATCH] Support v2019-12-12 for blob, file share, file datalake (#37) * add new version * upgrade version * upgrade version * refine Co-authored-by: Ubuntu --- README.rst | 9 + .../storagev2/blob/v2019_12_12/__init__.py | 6 +- .../blob/v2019_12_12/_blob_client.py | 257 ++- .../blob/v2019_12_12/_blob_service_client.py | 2 +- .../blob/v2019_12_12/_container_client.py | 78 +- .../blob/v2019_12_12/_deserialize.py | 67 +- .../storagev2/blob/v2019_12_12/_download.py | 2 +- .../_blob_operations_async.py | 24 +- .../_page_blob_operations_async.py | 20 + .../v2019_12_12/_generated/models/__init__.py | 2 +- .../models/_azure_blob_storage_enums.py | 14 +- .../v2019_12_12/_generated/models/_models.py | 14 +- .../_generated/models/_models_py3.py | 18 +- .../_generated/operations/_blob_operations.py | 24 +- .../operations/_page_blob_operations.py | 20 + .../storagev2/blob/v2019_12_12/_lease.py | 32 +- .../blob/v2019_12_12/_list_blobs_helper.py | 166 ++ .../storagev2/blob/v2019_12_12/_models.py | 233 +-- .../blob/v2019_12_12/_quick_query_helper.py | 14 +- .../storagev2/blob/v2019_12_12/_serialize.py | 12 +- .../v2019_12_12/_shared/authentication.py | 16 +- .../blob/v2019_12_12/_shared/avro/schema.py | 2 +- .../blob/v2019_12_12/_shared/base_client.py | 15 +- .../v2019_12_12/_shared/policies_async.py | 1 + .../blob/v2019_12_12/_shared/uploads.py | 6 +- .../storagev2/blob/v2019_12_12/_version.py | 2 +- .../v2019_12_12/aio/_blob_client_async.py | 194 +- .../aio/_blob_service_client_async.py | 2 +- .../aio/_container_client_async.py | 43 +- .../blob/v2019_12_12/aio/_download_async.py | 1 + .../blob/v2019_12_12/aio/_lease_async.py | 33 +- .../v2019_12_12/aio/_list_blobs_helper.py | 162 ++ .../storagev2/blob/v2019_12_12/aio/_models.py | 159 +- .../filedatalake/v2019_12_12/__init__.py | 79 + .../_data_lake_directory_client.py | 523 ++++++ .../v2019_12_12/_data_lake_file_client.py | 708 +++++++ .../v2019_12_12/_data_lake_lease.py | 245 +++ .../v2019_12_12/_data_lake_service_client.py | 421 +++++ .../filedatalake/v2019_12_12/_deserialize.py | 106 ++ .../filedatalake/v2019_12_12/_download.py | 53 + .../v2019_12_12/_file_system_client.py | 782 ++++++++ .../v2019_12_12/_generated/__init__.py | 18 + .../v2019_12_12/_generated/_configuration.py | 64 + .../_generated/_data_lake_storage_client.py | 67 + .../v2019_12_12/_generated/aio/__init__.py | 13 + .../_generated/aio/_configuration_async.py | 63 + .../aio/_data_lake_storage_client_async.py | 68 + .../aio/operations_async/__init__.py | 20 + .../_file_system_operations_async.py | 462 +++++ .../_path_operations_async.py | 1600 ++++++++++++++++ .../_service_operations_async.py | 128 ++ .../v2019_12_12/_generated/models/__init__.py | 66 + .../models/_data_lake_storage_client_enums.py | 55 + .../v2019_12_12/_generated/models/_models.py | 350 ++++ .../_generated/models/_models_py3.py | 350 ++++ .../_generated/operations/__init__.py | 20 + .../operations/_file_system_operations.py | 462 +++++ .../_generated/operations/_path_operations.py | 1599 ++++++++++++++++ .../operations/_service_operations.py | 128 ++ .../v2019_12_12/_generated/version.py | 13 + .../filedatalake/v2019_12_12/_models.py | 648 +++++++ .../filedatalake/v2019_12_12/_path_client.py | 649 +++++++ .../v2019_12_12/_quick_query_helper.py | 71 + .../filedatalake/v2019_12_12/_serialize.py | 81 + .../v2019_12_12/_shared/__init__.py | 56 + .../v2019_12_12/_shared/authentication.py | 140 ++ .../v2019_12_12/_shared/base_client.py | 437 +++++ .../v2019_12_12/_shared/base_client_async.py | 179 ++ .../v2019_12_12/_shared/constants.py | 26 + .../v2019_12_12/_shared/encryption.py | 542 ++++++ .../v2019_12_12/_shared/models.py | 468 +++++ .../v2019_12_12/_shared/parser.py | 20 + .../v2019_12_12/_shared/policies.py | 610 ++++++ .../v2019_12_12/_shared/policies_async.py | 220 +++ .../v2019_12_12/_shared/request_handlers.py | 147 ++ .../v2019_12_12/_shared/response_handlers.py | 159 ++ .../_shared/shared_access_signature.py | 209 +++ .../v2019_12_12/_shared/uploads.py | 568 ++++++ .../v2019_12_12/_shared/uploads_async.py | 367 ++++ .../v2019_12_12/_shared_access_signature.py | 349 ++++ .../v2019_12_12/_upload_helper.py | 87 + .../filedatalake/v2019_12_12/_version.py | 7 + .../filedatalake/v2019_12_12/aio/__init__.py | 24 + .../aio/_data_lake_directory_client_async.py | 511 +++++ .../aio/_data_lake_file_client_async.py | 513 +++++ .../v2019_12_12/aio/_data_lake_lease_async.py | 243 +++ .../aio/_data_lake_service_client_async.py | 372 ++++ .../v2019_12_12/aio/_download_async.py | 53 + .../aio/_file_system_client_async.py | 745 ++++++++ .../filedatalake/v2019_12_12/aio/_models.py | 110 ++ .../v2019_12_12/aio/_path_client_async.py | 490 +++++ .../v2019_12_12/aio/_upload_helper.py | 87 + .../fileshare/v2019_12_12/__init__.py | 68 + .../fileshare/v2019_12_12/_deserialize.py | 64 + .../v2019_12_12/_directory_client.py | 706 +++++++ .../fileshare/v2019_12_12/_download.py | 522 ++++++ .../fileshare/v2019_12_12/_file_client.py | 1328 +++++++++++++ .../v2019_12_12/_generated/__init__.py | 18 + .../_generated/_azure_file_storage.py | 71 + .../v2019_12_12/_generated/_configuration.py | 58 + .../v2019_12_12/_generated/aio/__init__.py | 13 + .../aio/_azure_file_storage_async.py | 72 + .../_generated/aio/_configuration_async.py | 59 + .../aio/operations_async/__init__.py | 22 + .../_directory_operations_async.py | 672 +++++++ .../_file_operations_async.py | 1666 +++++++++++++++++ .../_service_operations_async.py | 253 +++ .../_share_operations_async.py | 825 ++++++++ .../v2019_12_12/_generated/models/__init__.py | 108 ++ .../models/_azure_file_storage_enums.py | 135 ++ .../v2019_12_12/_generated/models/_models.py | 896 +++++++++ .../_generated/models/_models_py3.py | 896 +++++++++ .../_generated/operations/__init__.py | 22 + .../operations/_directory_operations.py | 672 +++++++ .../_generated/operations/_file_operations.py | 1665 ++++++++++++++++ .../operations/_service_operations.py | 253 +++ .../operations/_share_operations.py | 825 ++++++++ .../v2019_12_12/_generated/version.py | 13 + .../storagev2/fileshare/v2019_12_12/_lease.py | 170 ++ .../fileshare/v2019_12_12/_models.py | 925 +++++++++ .../fileshare/v2019_12_12/_parser.py | 42 + .../fileshare/v2019_12_12/_serialize.py | 111 ++ .../fileshare/v2019_12_12/_share_client.py | 705 +++++++ .../v2019_12_12/_share_service_client.py | 409 ++++ .../fileshare/v2019_12_12/_shared/__init__.py | 56 + .../v2019_12_12/_shared/authentication.py | 140 ++ .../v2019_12_12/_shared/base_client.py | 437 +++++ .../v2019_12_12/_shared/base_client_async.py | 179 ++ .../v2019_12_12/_shared/constants.py | 26 + .../v2019_12_12/_shared/encryption.py | 542 ++++++ .../fileshare/v2019_12_12/_shared/models.py | 468 +++++ .../fileshare/v2019_12_12/_shared/parser.py | 20 + .../fileshare/v2019_12_12/_shared/policies.py | 610 ++++++ .../v2019_12_12/_shared/policies_async.py | 220 +++ .../v2019_12_12/_shared/request_handlers.py | 147 ++ .../v2019_12_12/_shared/response_handlers.py | 159 ++ .../_shared/shared_access_signature.py | 209 +++ .../fileshare/v2019_12_12/_shared/uploads.py | 550 ++++++ .../v2019_12_12/_shared/uploads_async.py | 351 ++++ .../v2019_12_12/_shared_access_signature.py | 491 +++++ .../fileshare/v2019_12_12/_version.py | 7 + .../fileshare/v2019_12_12/aio/__init__.py | 20 + .../aio/_directory_client_async.py | 593 ++++++ .../v2019_12_12/aio/_download_async.py | 467 +++++ .../v2019_12_12/aio/_file_client_async.py | 1165 ++++++++++++ .../fileshare/v2019_12_12/aio/_lease_async.py | 166 ++ .../fileshare/v2019_12_12/aio/_models.py | 178 ++ .../v2019_12_12/aio/_share_client_async.py | 563 ++++++ .../aio/_share_service_client_async.py | 362 ++++ .../queue/v2018_03_28/_shared/base_client.py | 3 +- .../v2018_03_28/_shared/base_client_async.py | 3 +- .../queue/v2018_03_28/_shared/policies.py | 2 +- .../v2018_03_28/_shared/request_handlers.py | 2 +- .../queue/v2018_03_28/_shared/uploads.py | 6 +- .../v2018_03_28/_shared/uploads_async.py | 6 +- .../storagev2/queue/v2018_03_28/_version.py | 2 +- scripts/updatev2_1.sh | 2 +- setup.py | 2 +- 158 files changed, 41249 insertions(+), 470 deletions(-) create mode 100644 azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py create mode 100644 azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/constants.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py diff --git a/README.rst b/README.rst index fbe21e1..c8ee2f4 100644 --- a/README.rst +++ b/README.rst @@ -17,6 +17,15 @@ Handles multi-API versions of Azure Storage Data Plane originally from https://g Change Log ---------- +0.4.1 ++++++ +* Add tags support for blob +* Add new api support for azure-multiapi-storagev2: + - filedatalake + - v2019-12-12 + - fileshare + - v2019-12-12 + 0.4.0 +++++ * Add v2019-12-12 for azure.multiapi.storagev2.blob diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py index a540dce..caa0040 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py @@ -52,11 +52,12 @@ CustomerProvidedEncryptionKey, ContainerEncryptionScope, BlobQueryError, - DelimitedJSON, + DelimitedJsonDialect, DelimitedTextDialect, ObjectReplicationPolicy, ObjectReplicationRule ) +from ._list_blobs_helper import BlobPrefix __version__ = VERSION @@ -195,6 +196,7 @@ def download_blob_from_url( 'CorsRule', 'ContainerProperties', 'BlobProperties', + 'BlobPrefix', 'FilteredBlob', 'LeaseProperties', 'ContentSettings', @@ -215,7 +217,7 @@ def download_blob_from_url( 'PartialBatchErrorException', 'ContainerEncryptionScope', 'BlobQueryError', - 'DelimitedJSON', + 'DelimitedJsonDialect', 'DelimitedTextDialect', 'BlobQueryReader', 'ObjectReplicationPolicy', diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py index 5add1a0..b536605 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py @@ -13,8 +13,8 @@ try: from urllib.parse import urlparse, quote, unquote except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore import six from azure.core.tracing.decorator import distributed_trace @@ -46,13 +46,13 @@ serialize_blob_tags, serialize_query_format ) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream +from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags from ._quick_query_helper import BlobQueryReader from ._upload_helpers import ( upload_block_blob, upload_append_blob, upload_page_blob) -from ._models import BlobType, BlobBlock, BlobProperties +from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError from ._download import StorageStreamDownloader from ._lease import BlobLeaseClient, get_access_conditions @@ -180,7 +180,7 @@ def _format_url(self, hostname): @classmethod def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. + """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. :param str blob_url: The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be @@ -209,10 +209,18 @@ def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): if not parsed_url.netloc: raise ValueError("Invalid URL: {}".format(blob_url)) - path_blob = parsed_url.path.lstrip('/').split('/') account_path = "" - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) + if ".core." in parsed_url.netloc: + # .core. is indicating non-customized url. Blob name with directory info can also be parsed. + path_blob = parsed_url.path.lstrip('/').split('/', 1) + elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: + path_blob = parsed_url.path.lstrip('/').split('/', 2) + account_path += path_blob[0] + else: + # for customized url. blob name that has directory info cannot be parsed. + path_blob = parsed_url.path.lstrip('/').split('/') + if len(path_blob) > 2: + account_path = "/" + "/".join(path_blob[:-2]) account_url = "{}://{}{}?{}".format( parsed_url.scheme, parsed_url.netloc.rstrip('/'), @@ -456,6 +464,12 @@ def upload_blob( # pylint: disable=too-many-locals and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on @@ -610,6 +624,12 @@ def download_blob(self, offset=None, length=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -702,13 +722,13 @@ def query_blob(self, query_expression, **kwargs): :keyword blob_format: Optional. Defines the serialization of the data currently stored in the blob. The default is to treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJSON. - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJSON + a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. + :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect :keyword output_format: Optional. Defines the output serialization for the data stream. By default the data will be returned as it is represented in the blob. By providing an output format, the blob data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJSON. - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJSON + according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. + :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect :keyword lease: Required if the blob has an active lease. Value can be a BlobLeaseClient object or the lease ID as a string. @@ -730,6 +750,12 @@ def query_blob(self, query_expression, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -750,6 +776,7 @@ def query_blob(self, query_expression, **kwargs): :caption: select/project on blob/or blob snapshot data by providing simple query expressions. """ errors = kwargs.pop("on_error", None) + error_cls = kwargs.pop("error_cls", BlobQueryError) encoding = kwargs.pop("encoding", None) options, delimiter = self._quick_query_options(query_expression, **kwargs) try: @@ -763,7 +790,8 @@ def query_blob(self, query_expression, **kwargs): record_delimiter=delimiter, encoding=encoding, headers=headers, - response=raw_response_body) + response=raw_response_body, + error_cls=error_cls) @staticmethod def _generic_delete_blob_options(delete_snapshots=False, **kwargs): @@ -839,6 +867,12 @@ def delete_blob(self, delete_snapshots=False, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -918,6 +952,12 @@ def get_blob_properties(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -1017,6 +1057,12 @@ def set_http_headers(self, content_settings=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Blob-updated property dict (Etag and last modified) @@ -1085,6 +1131,12 @@ def set_blob_metadata(self, metadata=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -1426,6 +1478,11 @@ def create_snapshot(self, metadata=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + :keyword lease: Required if the blob has an active lease. Value can be a BlobLeaseClient object or the lease ID as a string. @@ -1485,6 +1542,7 @@ def _start_copy_from_url_options(self, source_url, metadata=None, incremental_co options = { 'copy_source': source_url, + 'seal_blob': kwargs.pop('seal_destination_blob', None), 'timeout': timeout, 'modified_access_conditions': dest_mod_conditions, 'blob_tags_string': blob_tags_string, @@ -1627,6 +1685,11 @@ def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, this is only applicable to block blobs on standard storage accounts. :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + :keyword bool requires_sync: Enforces that the service will not return a response until the copy is complete. :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). @@ -1733,6 +1796,12 @@ def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: A BlobLeaseClient object. @@ -1769,6 +1838,17 @@ def set_standard_blob_tier(self, standard_blob_tier, **kwargs): :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: Indicates the priority with which to rehydrate an archived blob + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 :keyword int timeout: The timeout parameter is expressed in seconds. :keyword lease: @@ -1778,12 +1858,17 @@ def set_standard_blob_tier(self, standard_blob_tier, **kwargs): :rtype: None """ access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) if standard_blob_tier is None: raise ValueError("A StandardBlobTier must be specified") + if self.snapshot and kwargs.get('version_id'): + raise ValueError("Snapshot and version_id cannot be set at the same time") try: self._client.blob.set_tier( tier=standard_blob_tier, + snapshot=self.snapshot, timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, lease_access_conditions=access_conditions, **kwargs) except StorageErrorException as error: @@ -2018,18 +2103,25 @@ def get_block_list(self, block_list_type="committed", **kwargs): Required if the blob has an active lease. Value can be a BlobLeaseClient object or the lease ID as a string. :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: A tuple of two lists - committed and uncommitted blocks :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) """ access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) try: blocks = self._client.block_blob.get_block_list( list_type=block_list_type, snapshot=self.snapshot, timeout=kwargs.pop('timeout', None), lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, **kwargs) except StorageErrorException as error: process_storage_error(error) @@ -2157,6 +2249,11 @@ def commit_block_list( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: A standard blob tier value to set the blob to. For this version of the library, this is only applicable to block blobs on standard storage accounts. @@ -2198,6 +2295,12 @@ def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to @@ -2209,6 +2312,7 @@ def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): :rtype: None """ access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) if premium_page_blob_tier is None: raise ValueError("A PremiumPageBlobTier must be specified") try: @@ -2216,6 +2320,7 @@ def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): tier=premium_page_blob_tier, timeout=kwargs.pop('timeout', None), lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, **kwargs) except StorageErrorException as error: process_storage_error(error) @@ -2223,9 +2328,11 @@ def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): def _set_blob_tags_options(self, tags=None, **kwargs): # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] tags = serialize_blob_tags(tags) + mod_conditions = get_modify_conditions(kwargs) options = { 'tags': tags, + 'modified_access_conditions': mod_conditions, 'cls': return_response_headers} options.update(kwargs) return options @@ -2257,6 +2364,8 @@ def set_blob_tags(self, tags=None, **kwargs): bitflips on the wire if using http instead of https, as https (the default), will already validate. Note that this MD5 hash is not stored with the blob. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Blob-updated property dict (Etag and last modified) @@ -2270,10 +2379,12 @@ def set_blob_tags(self, tags=None, **kwargs): def _get_blob_tags_options(self, **kwargs): # type: (**Any) -> Dict[str, str] + mod_conditions = get_modify_conditions(kwargs) options = { 'version_id': kwargs.pop('version_id', None), 'snapshot': self.snapshot, + 'modified_access_conditions': mod_conditions, 'timeout': kwargs.pop('timeout', None), 'cls': return_headers_and_deserialized} return options @@ -2289,6 +2400,8 @@ def get_blob_tags(self, **kwargs): :keyword str version_id: The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Key value pairs of blob tags. @@ -2297,7 +2410,7 @@ def get_blob_tags(self, **kwargs): options = self._get_blob_tags_options(**kwargs) try: _, tags = self._client.blob.get_tags(**options) - return BlobProperties._parse_tags(tags) # pylint: disable=protected-access + return parse_tags(tags) # pylint: disable=protected-access except StorageErrorException as error: process_storage_error(error) @@ -2384,6 +2497,12 @@ def get_page_ranges( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: @@ -2527,6 +2646,12 @@ def set_sequence_number(self, sequence_number_action, sequence_number=None, **kw and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Blob-updated property dict (Etag and last modified). @@ -2595,6 +2720,12 @@ def resize_blob(self, size, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on @@ -2719,6 +2850,12 @@ def upload_page( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -2885,6 +3022,12 @@ def upload_pages_from_url(self, source_url, # type: str and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The destination match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -2996,6 +3139,12 @@ def clear_page(self, offset, length, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -3120,6 +3269,12 @@ def append_block( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword str encoding: Defaults to UTF-8. :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: @@ -3261,6 +3416,12 @@ def append_block_from_url(self, copy_source_url, # type: str and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The destination match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~datetime.datetime source_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -3304,3 +3465,71 @@ def append_block_from_url(self, copy_source_url, # type: str return self._client.append_blob.append_block_from_url(**options) # type: ignore except StorageErrorException as error: process_storage_error(error) + + def _seal_append_blob_options(self, **kwargs): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + append_conditions = None + if appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return self._client.append_blob.seal(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py index c5c8c28..bde8475 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py @@ -39,8 +39,8 @@ from ._shared.models import UserDelegationKey from ._lease import BlobLeaseClient from ._models import ( - BlobProperties, ContainerProperties, + BlobProperties, PublicAccess, BlobAnalyticsLogging, Metrics, diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py index 75d287e..d771d19 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py @@ -40,9 +40,8 @@ from ._models import ( # pylint: disable=unused-import ContainerProperties, BlobProperties, - BlobPropertiesPaged, - BlobType, - BlobPrefix) + BlobType) +from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged from ._lease import BlobLeaseClient, get_access_conditions from ._blob_client import BlobClient @@ -783,6 +782,12 @@ def upload_blob( and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to @@ -893,6 +898,12 @@ def delete_blob( and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -952,6 +963,12 @@ def download_blob(self, blob, offset=None, length=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -998,6 +1015,9 @@ def _generate_delete_blobs_subrequest_options( if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags # Construct parameters timeout = kwargs.pop('timeout', None) @@ -1027,6 +1047,8 @@ def _generate_delete_blobs_subrequest_options( if if_none_match is not None: header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access "if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access return query_parameters, header_parameters @@ -1039,6 +1061,7 @@ def _generate_delete_blobs_options(self, delete_snapshots = kwargs.pop('delete_snapshots', None) if_modified_since = kwargs.pop('if_modified_since', None) if_unmodified_since = kwargs.pop('if_unmodified_since', None) + if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) kwargs.update({'raise_on_any_failure': raise_on_any_failure, 'sas': self._query_str.replace('?', '&'), 'timeout': '&timeout=' + str(timeout) if timeout else "" @@ -1057,18 +1080,21 @@ def _generate_delete_blobs_options(self, if_modified_since=if_modified_since or blob.get('if_modified_since'), if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), etag=blob.get('etag'), + if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') else None, timeout=blob.get('timeout'), ) - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) except AttributeError: - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options( + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access delete_snapshots=delete_snapshots, if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since + if_unmodified_since=if_unmodified_since, + if_tags_match_condition=if_tags_match_condition ) + query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) + req = HttpRequest( "DELETE", "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), @@ -1113,6 +1139,8 @@ def delete_blobs(self, *blobs, **kwargs): key: 'etag', value type: str match the etag or not: key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str lease: key: 'lease_id', value type: Union[str, LeaseClient] timeout for subrequest: @@ -1135,6 +1163,12 @@ def delete_blobs(self, *blobs, **kwargs): If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword bool raise_on_any_failure: This is a boolean param which defaults to True. When this is set, an exception is raised even if there is a single operation failure. @@ -1152,12 +1186,15 @@ def delete_blobs(self, *blobs, **kwargs): :dedent: 8 :caption: Deleting multiple blobs. """ + if len(blobs) == 0: + return iter(list()) + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) return self._batch_send(*reqs, **options) def _generate_set_tiers_subrequest_options( - self, tier, rehydrate_priority=None, lease_access_conditions=None, **kwargs + self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs ): """This code is a copy from _generated. @@ -1165,6 +1202,9 @@ def _generate_set_tiers_subrequest_options( """ if not tier: raise ValueError("A blob tier must be specified") + if snapshot and version_id: + raise ValueError("Snapshot and version_id cannot be set at the same time") + if_tags = kwargs.pop('if_tags', None) lease_id = None if lease_access_conditions is not None: @@ -1174,6 +1214,10 @@ def _generate_set_tiers_subrequest_options( timeout = kwargs.pop('timeout', None) # Construct parameters query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if version_id is not None: + query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access if timeout is not None: query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call @@ -1186,6 +1230,8 @@ def _generate_set_tiers_subrequest_options( "rehydrate_priority", rehydrate_priority, 'str') if lease_id is not None: header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access return query_parameters, header_parameters @@ -1197,6 +1243,7 @@ def _generate_set_tiers_options(self, timeout = kwargs.pop('timeout', None) raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) rehydrate_priority = kwargs.pop('rehydrate_priority', None) + if_tags = kwargs.pop('if_tags_match_condition', None) kwargs.update({'raise_on_any_failure': raise_on_any_failure, 'sas': self._query_str.replace('?', '&'), 'timeout': '&timeout=' + str(timeout) if timeout else "" @@ -1211,13 +1258,16 @@ def _generate_set_tiers_options(self, tier = blob_tier or blob.get('blob_tier') query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( tier=tier, + snapshot=blob.get('snapshot'), + version_id=blob.get('version_id'), rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), lease_access_conditions=blob.get('lease_id'), + if_tags=if_tags or blob.get('if_tags_match_condition'), timeout=timeout or blob.get('timeout') ) except AttributeError: query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority) + blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) req = HttpRequest( "PUT", @@ -1270,12 +1320,24 @@ def set_standard_blob_tier_blobs( key: 'rehydrate_priority', value type: RehydratePriority lease: key: 'lease_id', value type: Union[str, LeaseClient] + snapshot: + key: "snapshost", value type: str + version id: + key: "version_id", value type: str + tags match condition: + key: 'if_tags_match_condition', value type: str timeout for subrequest: key: 'timeout', value type: int :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :keyword bool raise_on_any_failure: diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py b/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py index 67479d3..a8b48b7 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py @@ -9,8 +9,11 @@ TYPE_CHECKING ) +from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties +from ._shared.models import get_enum_value + from ._shared.response_handlers import deserialize_metadata -from ._models import BlobProperties, ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ +from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule if TYPE_CHECKING: @@ -20,7 +23,7 @@ def deserialize_blob_properties(response, obj, headers): blob_properties = BlobProperties( metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response), + object_replication_source_properties=deserialize_ors_policies(response.headers), **headers ) if 'Content-Range' in headers: @@ -31,20 +34,21 @@ def deserialize_blob_properties(response, obj, headers): return blob_properties -def deserialize_ors_policies(response): +def deserialize_ors_policies(policy_dictionary): + + if policy_dictionary is None: + return None # For source blobs (blobs that have policy ids and rule ids applied to them), # the header will be formatted as "x-ms-or-_: {Complete, Failed}". # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in response.headers.items() - if key.startswith('x-ms-or') and key != 'x-ms-or-policy-id'} + or_policy_status_headers = {key: val for key, val in policy_dictionary.items() + if 'or-' in key and key != 'x-ms-or-policy-id'} parsed_result = {} - # all the ors headers have the same prefix, so we note down its length here to avoid recalculating it repeatedly - header_prefix_length = len('x-ms-or-') - for key, val in or_policy_status_headers.items(): - policy_and_rule_ids = key[header_prefix_length:].split('_') + # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule + policy_and_rule_ids = key.split('or-')[1].split('_') policy_id = policy_and_rule_ids[0] rule_id = policy_and_rule_ids[1] @@ -106,3 +110,48 @@ def service_properties_deserialize(generated): 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access } + + +def get_blob_properties_from_generated_code(generated): + blob = BlobProperties() + blob.name = generated.name + blob_type = get_enum_value(generated.properties.blob_type) + blob.blob_type = BlobType(blob_type) if blob_type else None + blob.etag = generated.properties.etag + blob.deleted = generated.deleted + blob.snapshot = generated.snapshot + blob.is_append_blob_sealed = generated.properties.is_sealed + blob.metadata = generated.metadata.additional_properties if generated.metadata else {} + blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None + blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access + blob.last_modified = generated.properties.last_modified + blob.creation_time = generated.properties.creation_time + blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access + blob.size = generated.properties.content_length + blob.page_blob_sequence_number = generated.properties.blob_sequence_number + blob.server_encrypted = generated.properties.server_encrypted + blob.encryption_scope = generated.properties.encryption_scope + blob.deleted_time = generated.properties.deleted_time + blob.remaining_retention_days = generated.properties.remaining_retention_days + blob.blob_tier = generated.properties.access_tier + blob.rehydrate_priority = generated.properties.rehydrate_priority + blob.blob_tier_inferred = generated.properties.access_tier_inferred + blob.archive_status = generated.properties.archive_status + blob.blob_tier_change_time = generated.properties.access_tier_change_time + blob.version_id = generated.version_id + blob.is_current_version = generated.is_current_version + blob.tag_count = generated.properties.tag_count + blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access + blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) + return blob + + +def parse_tags(generated_tags): + # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] + """Deserialize a list of BlobTag objects into a dict. + """ + if generated_tags: + tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} + return tag_dict + return None diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_download.py b/azure/multiapi/storagev2/blob/v2019_12_12/_download.py index 478ed3c..e11023c 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_download.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_download.py @@ -258,7 +258,7 @@ class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attr The properties of the blob being downloaded. If only a range of the data is being downloaded, this will be reflected in the properties. :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, + The size of the total data in the stream. This will be the byte range if specified, otherwise the total size of the blob. """ diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py index 344088b..24aea41 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py @@ -439,6 +439,7 @@ async def get_properties(self, snapshot=None, version_id=None, timeout=None, req 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) @@ -2257,7 +2258,7 @@ async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, ti return cls(response, None, response_headers) start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. @@ -2296,9 +2297,6 @@ async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non :param blob_tags_string: Optional. Used to set blob tags in various blob operations. :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. - Service version 2019-12-12 and newer. - :type seal_blob: bool :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: @@ -2376,8 +2374,6 @@ async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') if blob_tags_string is not None: header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') if source_if_modified_since is not None: header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') @@ -2505,7 +2501,7 @@ async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, leas return cls(response, None, response_headers) abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium @@ -2545,6 +2541,10 @@ async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, reh operation :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) @@ -2556,6 +2556,9 @@ async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, reh lease_id = None if lease_access_conditions is not None: lease_id = lease_access_conditions.lease_id + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "tier" @@ -2586,6 +2589,8 @@ async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, reh header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') if lease_id is not None: header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -2720,6 +2725,9 @@ async def query(self, query_request=None, snapshot=None, timeout=None, request_i if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "query" @@ -2761,6 +2769,8 @@ async def query(self, query_request=None, snapshot=None, timeout=None, request_i header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct body if query_request is not None: diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py index af39039..c54a27c 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py @@ -489,6 +489,9 @@ async def clear_pages(self, content_length, timeout=None, range=None, request_id if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "page" page_write = "clear" @@ -539,6 +542,8 @@ async def clear_pages(self, content_length, timeout=None, range=None, request_id header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -1100,6 +1105,9 @@ async def resize(self, blob_content_length, timeout=None, request_id=None, lease if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "properties" @@ -1140,6 +1148,8 @@ async def resize(self, blob_content_length, timeout=None, request_id=None, lease header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -1218,6 +1228,9 @@ async def update_sequence_number(self, sequence_number_action, timeout=None, blo if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "properties" @@ -1252,6 +1265,8 @@ async def update_sequence_number(self, sequence_number_action, timeout=None, blo header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -1323,6 +1338,9 @@ async def copy_incremental(self, copy_source, timeout=None, request_id=None, mod if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "incrementalcopy" @@ -1353,6 +1371,8 @@ async def copy_incremental(self, copy_source, timeout=None, request_id=None, mod header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py index a77c557..6709caf 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py @@ -202,13 +202,13 @@ 'AccessTier', 'ArchiveStatus', 'BlobType', + 'RehydratePriority', 'StorageErrorCode', 'GeoReplicationStatusType', 'QueryFormatType', 'AccessTierRequired', 'AccessTierOptional', 'PremiumPageBlobAccessTier', - 'RehydratePriority', 'BlobExpiryOptions', 'BlockListType', 'DeleteSnapshotsOptionType', diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py index 853f65b..d89e858 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py @@ -78,6 +78,12 @@ class BlobType(str, Enum): append_blob = "AppendBlob" +class RehydratePriority(str, Enum): + + high = "High" + standard = "Standard" + + class StorageErrorCode(str, Enum): account_already_exists = "AccountAlreadyExists" @@ -257,12 +263,6 @@ class PremiumPageBlobAccessTier(str, Enum): p80 = "P80" -class RehydratePriority(str, Enum): - - high = "High" - standard = "Standard" - - class BlobExpiryOptions(str, Enum): never_expire = "NeverExpire" @@ -333,6 +333,8 @@ class AccountKind(str, Enum): storage = "Storage" blob_storage = "BlobStorage" storage_v2 = "StorageV2" + file_storage = "FileStorage" + block_blob_storage = "BlockBlobStorage" class SyncCopyStatusType(str, Enum): diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py index 1922133..acb79c0 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py @@ -216,7 +216,7 @@ class BlobItemInternal(Model): 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, - 'object_replication_metadata': {'key': 'ObjectReplicationMetadata', 'type': '{str}', 'xml': {'name': 'ObjectReplicationMetadata'}}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, } _xml_map = { 'name': 'Blob' @@ -364,6 +364,9 @@ class BlobPropertiesInternal(Model): :type expires_on: datetime :param is_sealed: :type is_sealed: bool + :param rehydrate_priority: Possible values include: 'High', 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority """ _validation = { @@ -406,7 +409,8 @@ class BlobPropertiesInternal(Model): 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, - 'is_sealed': {'key': 'IsSealed', 'type': 'bool', 'xml': {'name': 'IsSealed'}}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, } _xml_map = { 'name': 'Properties' @@ -449,6 +453,7 @@ def __init__(self, **kwargs): self.tag_count = kwargs.get('tag_count', None) self.expires_on = kwargs.get('expires_on', None) self.is_sealed = kwargs.get('is_sealed', None) + self.rehydrate_priority = kwargs.get('rehydrate_priority', None) class BlobTag(Model): @@ -1755,6 +1760,9 @@ class StaticWebsite(Model): :type index_document: str :param error_document404_path: The absolute path of the custom 404 page :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index + page + :type default_index_document_path: str """ _validation = { @@ -1765,6 +1773,7 @@ class StaticWebsite(Model): 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, } _xml_map = { } @@ -1774,6 +1783,7 @@ def __init__(self, **kwargs): self.enabled = kwargs.get('enabled', None) self.index_document = kwargs.get('index_document', None) self.error_document404_path = kwargs.get('error_document404_path', None) + self.default_index_document_path = kwargs.get('default_index_document_path', None) class StorageError(Model): diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py index bd6fc85..36c3964 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py @@ -216,7 +216,7 @@ class BlobItemInternal(Model): 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, - 'object_replication_metadata': {'key': 'ObjectReplicationMetadata', 'type': '{str}', 'xml': {'name': 'ObjectReplicationMetadata'}}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, } _xml_map = { 'name': 'Blob' @@ -364,6 +364,9 @@ class BlobPropertiesInternal(Model): :type expires_on: datetime :param is_sealed: :type is_sealed: bool + :param rehydrate_priority: Possible values include: 'High', 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority """ _validation = { @@ -406,13 +409,14 @@ class BlobPropertiesInternal(Model): 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, - 'is_sealed': {'key': 'IsSealed', 'type': 'bool', 'xml': {'name': 'IsSealed'}}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, } _xml_map = { 'name': 'Properties' } - def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, **kwargs) -> None: + def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, rehydrate_priority=None, **kwargs) -> None: super(BlobPropertiesInternal, self).__init__(**kwargs) self.creation_time = creation_time self.last_modified = last_modified @@ -449,6 +453,7 @@ def __init__(self, *, last_modified, etag: str, creation_time=None, content_leng self.tag_count = tag_count self.expires_on = expires_on self.is_sealed = is_sealed + self.rehydrate_priority = rehydrate_priority class BlobTag(Model): @@ -1755,6 +1760,9 @@ class StaticWebsite(Model): :type index_document: str :param error_document404_path: The absolute path of the custom 404 page :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index + page + :type default_index_document_path: str """ _validation = { @@ -1765,15 +1773,17 @@ class StaticWebsite(Model): 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, } _xml_map = { } - def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, **kwargs) -> None: + def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, default_index_document_path: str=None, **kwargs) -> None: super(StaticWebsite, self).__init__(**kwargs) self.enabled = enabled self.index_document = index_document self.error_document404_path = error_document404_path + self.default_index_document_path = default_index_document_path class StorageError(Model): diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py index e956bcc..9478016 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py @@ -438,6 +438,7 @@ def get_properties(self, snapshot=None, version_id=None, timeout=None, request_i 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) @@ -2256,7 +2257,7 @@ def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non return cls(response, None, response_headers) start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. @@ -2295,9 +2296,6 @@ def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, req :param blob_tags_string: Optional. Used to set blob tags in various blob operations. :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. - Service version 2019-12-12 and newer. - :type seal_blob: bool :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: @@ -2375,8 +2373,6 @@ def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, req header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') if blob_tags_string is not None: header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') if source_if_modified_since is not None: header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') @@ -2504,7 +2500,7 @@ def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_acce return cls(response, None, response_headers) abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium @@ -2544,6 +2540,10 @@ def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate operation :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) @@ -2555,6 +2555,9 @@ def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate lease_id = None if lease_access_conditions is not None: lease_id = lease_access_conditions.lease_id + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "tier" @@ -2585,6 +2588,8 @@ def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') if lease_id is not None: header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -2719,6 +2724,9 @@ def query(self, query_request=None, snapshot=None, timeout=None, request_id=None if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "query" @@ -2760,6 +2768,8 @@ def query(self, query_request=None, snapshot=None, timeout=None, request_id=None header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct body if query_request is not None: diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py index a65ab4c..fedc96c 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py @@ -489,6 +489,9 @@ def clear_pages(self, content_length, timeout=None, range=None, request_id=None, if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "page" page_write = "clear" @@ -539,6 +542,8 @@ def clear_pages(self, content_length, timeout=None, range=None, request_id=None, header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -1100,6 +1105,9 @@ def resize(self, blob_content_length, timeout=None, request_id=None, lease_acces if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "properties" @@ -1140,6 +1148,8 @@ def resize(self, blob_content_length, timeout=None, request_id=None, lease_acces header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -1218,6 +1228,9 @@ def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequ if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "properties" @@ -1252,6 +1265,8 @@ def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) @@ -1323,6 +1338,9 @@ def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_ if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags comp = "incrementalcopy" @@ -1353,6 +1371,8 @@ def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py b/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py index 92dd0a3..7d38423 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py @@ -106,6 +106,12 @@ def acquire(self, lease_duration=-1, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -123,7 +129,7 @@ def acquire(self, lease_duration=-1, **kwargs): process_storage_error(error) self.id = response.get('lease_id') # type: str self.last_modified = response.get('last_modified') # type: datetime - self.etag = kwargs.get('etag') # type: str + self.etag = response.get('etag') # type: str @distributed_trace def renew(self, **kwargs): @@ -153,6 +159,12 @@ def renew(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: None @@ -197,6 +209,12 @@ def release(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: None @@ -240,6 +258,12 @@ def change(self, proposed_lease_id, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: None @@ -293,6 +317,12 @@ def break_lease(self, lease_break_period=None, **kwargs): If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: Approximate time remaining in the lease period, in seconds. diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py new file mode 100644 index 0000000..f1dd70f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py @@ -0,0 +1,166 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.paging import PageIterator, ItemPaged +from ._deserialize import get_blob_properties_from_generated_code +from ._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix +from ._models import BlobProperties +from ._shared.models import DictMixin +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + def _extract_data_cb(self, get_next_return): + continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class BlobPrefix(ItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str next_marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_models.py b/azure/multiapi/storagev2/blob/v2019_12_12/_models.py index 855d37c..a97445d 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_models.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_models.py @@ -8,7 +8,7 @@ from enum import Enum -from azure.core.paging import PageIterator, ItemPaged +from azure.core.paging import PageIterator from ._generated.models import FilterBlobItem from ._shared import decode_base64_to_text @@ -21,8 +21,6 @@ from ._generated.models import CorsRule as GeneratedCorsRule from ._generated.models import AccessPolicy as GenAccessPolicy from ._generated.models import StorageErrorException -from ._generated.models import BlobPrefix as GenBlobPrefix -from ._generated.models import BlobItemInternal class BlobType(str, Enum): @@ -223,6 +221,8 @@ class StaticWebsite(GeneratedStaticWebsite): The default name of the index page under each directory. :keyword str error_document404_path: The absolute path of the custom 404 page. + :keyword str default_index_document_path: + Absolute path of the default index page. """ def __init__(self, **kwargs): @@ -230,9 +230,11 @@ def __init__(self, **kwargs): if self.enabled: self.index_document = kwargs.get('index_document') self.error_document404_path = kwargs.get('error_document404_path') + self.default_index_document_path = kwargs.get('default_index_document_path') else: self.index_document = None self.error_document404_path = None + self.default_index_document_path = None @classmethod def _from_generated(cls, generated): @@ -242,6 +244,7 @@ def _from_generated(cls, generated): enabled=generated.enabled, index_document=generated.index_document, error_document404_path=generated.error_document404_path, + default_index_document_path=generated.default_index_document_path ) @@ -440,6 +443,11 @@ class BlobProperties(DictMixin): requested a subset of the blob. :ivar int append_blob_committed_block_count: (For Append Blobs) Number of committed blocks in the blob. + :ivar bool is_append_blob_sealed: + Indicate if the append blob is sealed or not. + + .. versionadded:: 12.4.0 + :ivar int page_blob_sequence_number: (For Page Blobs) Sequence number for page blob used for coordinating concurrent writes. @@ -458,6 +466,8 @@ class BlobProperties(DictMixin): for at least a month. The archive tier is optimized for storing data that is rarely accessed and stored for at least six months with flexible latency requirements. + :ivar str rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob :ivar ~datetime.datetime blob_tier_change_time: Indicates when the access tier was last changed. :ivar bool blob_tier_inferred: @@ -484,12 +494,24 @@ class BlobProperties(DictMixin): Whether this blob is encrypted. :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: Only present for blobs that have policy ids and rule ids applied to them. + + .. versionadded:: 12.4.0 + :ivar str object_replication_destination_policy: Represents the Object Replication Policy Id that created this blob. + + .. versionadded:: 12.4.0 + :ivar int tag_count: Tags count on this blob. + + .. versionadded:: 12.4.0 + :ivar dict(str, str) tags: Key value pair of tags on this blob. + + .. versionadded:: 12.4.0 + """ def __init__(self, **kwargs): @@ -506,12 +528,14 @@ def __init__(self, **kwargs): self.size = kwargs.get('Content-Length') self.content_range = kwargs.get('Content-Range') self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') + self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') self.server_encrypted = kwargs.get('x-ms-server-encrypted') self.copy = CopyProperties(**kwargs) self.content_settings = ContentSettings(**kwargs) self.lease = LeaseProperties(**kwargs) self.blob_tier = kwargs.get('x-ms-access-tier') + self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') self.deleted = False @@ -527,133 +551,6 @@ def __init__(self, **kwargs): self.tag_count = kwargs.get('x-ms-tag-count') self.tags = None - @classmethod - def _from_generated(cls, generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = blob._parse_tags(generated.blob_tags) # pylint: disable=protected-access - return blob - - @staticmethod - def _parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = BlobProperties._from_generated(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - class FilteredBlob(DictMixin): """Blob info from a Filter Blobs API call. @@ -736,74 +633,6 @@ def _build_item(item): return item -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - class LeaseProperties(DictMixin): """Blob Lease Properties. @@ -1230,7 +1059,7 @@ def _from_generated(cls, generated): return None -class DelimitedJSON(object): +class DelimitedJsonDialect(object): """Defines the input or output JSON serialization for a blob data query. :keyword str delimiter: The line separator character, default value is '\n' @@ -1293,7 +1122,7 @@ def __init__(self, **kwargs): self.status = kwargs.pop('status', None) -class BlobQueryError(Exception): +class BlobQueryError(object): """The error happened during quick query operation. :ivar str error: @@ -1312,7 +1141,3 @@ def __init__(self, error=None, is_fatal=False, description=None, position=None): self.is_fatal = is_fatal self.description = description self.position = position - message = self.error - if self.description: - message += ": {}".format(self.description) - super(BlobQueryError, self).__init__(message) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py index ab69dd7..eb51d98 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py @@ -10,8 +10,6 @@ from ._shared.avro.datafile import DataFileReader from ._shared.avro.avro_io import DatumReader -from ._models import BlobQueryError - class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes """A streaming object to read query results. @@ -35,7 +33,8 @@ def __init__( record_delimiter='\n', encoding=None, headers=None, - response=None + response=None, + error_cls=None, ): self.name = name self.container = container @@ -47,6 +46,7 @@ def __init__( self._encoding = encoding self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) self._first_result = self._process_record(next(self._parsed_results)) + self._error_cls = error_cls def __len__(self): return self._size @@ -57,7 +57,7 @@ def _process_record(self, result): if 'data' in result: return result.get('data') if 'fatal' in result: - error = BlobQueryError( + error = self._error_cls( error=result['name'], is_fatal=result['fatal'], description=result['description'], @@ -148,11 +148,13 @@ def __iter__(self): def seekable(): return True - def next(self): + def __next__(self): next_part = next(self.iterator) self._download_offset += len(next_part) return next_part + next = __next__ # Python 2 compatibility. + def tell(self): return self._point @@ -170,7 +172,7 @@ def read(self, size): try: # keep reading from the generator until the buffer of this stream has enough data to read while self._point + size > self._download_offset: - self._buf += self.next() + self._buf += self.__next__() except StopIteration: self.file_length = self._download_offset diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py b/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py index 66ee1f3..6781096 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py @@ -13,7 +13,7 @@ from ._models import ( ContainerEncryptionScope, - DelimitedJSON + DelimitedJsonDialect ) from ._generated.models import ( ModifiedAccessConditions, @@ -70,7 +70,8 @@ def get_modify_conditions(kwargs): if_modified_since=kwargs.pop('if_modified_since', None), if_unmodified_since=kwargs.pop('if_unmodified_since', None), if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) + if_none_match=if_none_match or kwargs.pop('if_none_match', None), + if_tags=kwargs.pop('if_tags_match_condition', None) ) @@ -81,7 +82,8 @@ def get_source_conditions(kwargs): source_if_modified_since=kwargs.pop('source_if_modified_since', None), source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), + source_if_tags=kwargs.pop('source_if_tags_match_condition', None) ) @@ -147,7 +149,7 @@ def serialize_blob_tags(tags=None): def serialize_query_format(formater): - if isinstance(formater, DelimitedJSON): + if isinstance(formater, DelimitedJsonDialect): serialization_settings = JsonTextConfiguration( record_separator=formater.delimiter ) @@ -173,5 +175,5 @@ def serialize_query_format(formater): elif not formater: return None else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJSON.") + raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py index a8db96d..b11dc57 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py @@ -64,27 +64,30 @@ def __init__(self, account_name, account_key): self.account_key = account_key super(SharedKeyCredentialPolicy, self).__init__() - def _get_headers(self, request, headers_to_sign): + @staticmethod + def _get_headers(request, headers_to_sign): headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) if 'content-length' in headers and headers['content-length'] == '0': del headers['content-length'] return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - def _get_verb(self, request): + @staticmethod + def _get_verb(request): return request.http_request.method + '\n' def _get_canonicalized_resource(self, request): uri_path = urlparse(request.http_request.url).path try: if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): uri_path = URL(uri_path) return '/' + self.account_name + str(uri_path) except TypeError: pass return '/' + self.account_name + uri_path - def _get_canonicalized_headers(self, request): + @staticmethod + def _get_canonicalized_headers(request): string_to_sign = '' x_ms_headers = [] for name, value in request.http_request.headers.items(): @@ -96,8 +99,9 @@ def _get_canonicalized_headers(self, request): string_to_sign += ''.join([name, ':', value, '\n']) return string_to_sign - def _get_canonicalized_resource_query(self, request): - sorted_queries = [(name, value) for name, value in request.http_request.query.items()] + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) sorted_queries.sort() string_to_sign = '' diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py index 6832ab4..ffe2853 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py @@ -393,7 +393,7 @@ class NamedSchema(Schema): def __init__( self, data_type, - name, + name=None, namespace=None, names=None, other_props=None, diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py index 79bab02..361931a 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py @@ -84,12 +84,17 @@ def __init__( raise ValueError("Invalid service: {}".format(service)) service_name = service.split('-')[0] account = parsed_url.netloc.split(".{}.core.".format(service_name)) + self.account_name = account[0] if len(account) > 1 else None - secondary_hostname = None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") - self.credential = format_shared_key_credential(account, credential) + self.credential = _format_shared_key_credential(self.account_name, credential) if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None if hasattr(self.credential, "account_name"): self.account_name = self.credential.account_name secondary_hostname = "{}-secondary.{}.{}".format( @@ -326,11 +331,11 @@ def __exit__(self, *args): # pylint: disable=arguments-differ pass -def format_shared_key_credential(account, credential): +def _format_shared_key_credential(account_name, credential): if isinstance(credential, six.string_types): - if len(account) < 2: + if not account_name: raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account[0], "account_key": credential} + credential = {"account_name": account_name, "account_key": credential} if isinstance(credential, dict): if "account_name" not in credential: raise ValueError("Shared key credential missing 'account_name") diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py index c0a4476..e0926b8 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method import asyncio import random diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py index 623fa16..abf3fb2 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py @@ -520,9 +520,11 @@ def __iter__(self): def seekable(self): return False - def next(self): + def __next__(self): return next(self.iterator) + next = __next__ # Python 2 compatibility. + def tell(self, *args, **kwargs): raise UnsupportedOperation("Data generator does not support tell.") @@ -534,7 +536,7 @@ def read(self, size): count = len(self.leftover) try: while count < size: - chunk = self.next() + chunk = self.__next__() if isinstance(chunk, six.text_type): chunk = chunk.encode(self.encoding) data += chunk diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_version.py b/azure/multiapi/storagev2/blob/v2019_12_12/_version.py index 85a0126..c40634c 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_version.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/_version.py @@ -4,4 +4,4 @@ # license information. # -------------------------------------------------------------------------- -VERSION = "12.4.0b1" +VERSION = "12.4.0" diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py index 82a5c62..3275d02 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines, invalid-overridden-method from typing import ( # pylint: disable=unused-import Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, @@ -15,7 +15,7 @@ from .._shared.base_client_async import AsyncStorageAccountHostsMixin from .._shared.policies_async import ExponentialRetry from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result +from .._deserialize import get_page_ranges_result, parse_tags from .._serialize import get_modify_conditions, get_api_version from .._generated import VERSION from .._generated.aio import AzureBlobStorage @@ -205,6 +205,12 @@ async def upload_blob( and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on @@ -315,6 +321,12 @@ async def download_blob(self, offset=None, length=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -397,6 +409,12 @@ async def delete_blob(self, delete_snapshots=False, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -476,6 +494,12 @@ async def get_blob_properties(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -551,6 +575,12 @@ async def set_http_headers(self, content_settings=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Blob-updated property dict (Etag and last modified) @@ -593,6 +623,12 @@ async def set_blob_metadata(self, metadata=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -806,6 +842,12 @@ async def create_snapshot(self, metadata=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword lease: Required if the blob has an active lease. Value can be a BlobLeaseClient object or the lease ID as a string. @@ -950,6 +992,12 @@ async def start_copy_from_url(self, source_url, metadata=None, incremental_copy= and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The destination match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword destination_lease: The lease ID specified for this header must match the lease ID of the destination blob. If the request does not include the lease ID or it is not @@ -970,6 +1018,11 @@ async def start_copy_from_url(self, source_url, metadata=None, incremental_copy= this is only applicable to block blobs on standard storage accounts. :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + :keyword bool requires_sync: Enforces that the service will not return a response until the copy is complete. :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). @@ -1059,6 +1112,12 @@ async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: A BlobLeaseClient object. @@ -1095,6 +1154,12 @@ async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :keyword lease: @@ -1104,12 +1169,14 @@ async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): :rtype: None """ access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) if standard_blob_tier is None: raise ValueError("A StandardBlobTier must be specified") try: await self._client.blob.set_tier( tier=standard_blob_tier, timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, lease_access_conditions=access_conditions, **kwargs) except StorageErrorException as error: @@ -1245,18 +1312,26 @@ async def get_block_list(self, block_list_type="committed", **kwargs): Required if the blob has an active lease. Value can be a BlobLeaseClient object or the lease ID as a string. :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: A tuple of two lists - committed and uncommitted blocks :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) """ access_conditions = get_access_conditions(kwargs.pop('kease', None)) + mod_conditions = get_modify_conditions(kwargs) try: blocks = await self._client.block_blob.get_block_list( list_type=block_list_type, snapshot=self.snapshot, timeout=kwargs.pop('timeout', None), lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, **kwargs) except StorageErrorException as error: process_storage_error(error) @@ -1319,6 +1394,12 @@ async def commit_block_list( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: A standard blob tier value to set the blob to. For this version of the library, this is only applicable to block blobs on standard storage accounts. @@ -1360,6 +1441,12 @@ async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to @@ -1371,6 +1458,7 @@ async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): :rtype: None """ access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) if premium_page_blob_tier is None: raise ValueError("A PremiumPageBlobTiermust be specified") try: @@ -1378,6 +1466,7 @@ async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): tier=premium_page_blob_tier, timeout=kwargs.pop('timeout', None), lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, **kwargs) except StorageErrorException as error: process_storage_error(error) @@ -1409,6 +1498,9 @@ async def set_blob_tags(self, tags=None, **kwargs): bitflips on the wire if using http instead of https, as https (the default), will already validate. Note that this MD5 hash is not stored with the blob. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Blob-updated property dict (Etag and last modified) @@ -1431,6 +1523,9 @@ async def get_blob_tags(self, **kwargs): :keyword str version_id: The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Key value pairs of blob tags. @@ -1439,7 +1534,7 @@ async def get_blob_tags(self, **kwargs): options = self._get_blob_tags_options(**kwargs) try: _, tags = await self._client.blob.get_tags(**options) - return BlobProperties._parse_tags(tags) # pylint: disable=protected-access + return parse_tags(tags) # pylint: disable=protected-access except StorageErrorException as error: process_storage_error(error) @@ -1493,6 +1588,12 @@ async def get_page_ranges( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: @@ -1624,6 +1725,12 @@ async def set_sequence_number( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :returns: Blob-updated property dict (Etag and last modified). @@ -1668,6 +1775,12 @@ async def resize_blob(self, size, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on @@ -1742,6 +1855,12 @@ async def upload_page( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -1849,6 +1968,12 @@ async def upload_pages_from_url(self, source_url, # type: str and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The destination match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -1923,6 +2048,12 @@ async def clear_page(self, offset, length, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -1992,6 +2123,12 @@ async def append_block( # type: ignore and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword str encoding: Defaults to UTF-8. :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: @@ -2074,6 +2211,12 @@ async def append_block_from_url(self, copy_source_url, # type: str and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The destination match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~datetime.datetime source_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -2117,3 +2260,48 @@ async def append_block_from_url(self, copy_source_url, # type: str return await self._client.append_blob.append_block_from_url(**options) # type: ignore except StorageErrorException as error: process_storage_error(error) + + @distributed_trace_async() + async def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return await self._client.append_blob.seal(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py index 14944c5..ab2e8a0 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - +# pylint: disable=invalid-overridden-method import functools from typing import ( # pylint: disable=unused-import Union, Optional, Any, Iterable, Dict, List, diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py index 8a777cb..6217e99 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py @@ -4,7 +4,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - +# pylint: disable=invalid-overridden-method import functools from typing import ( # pylint: disable=unused-import Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, @@ -33,8 +33,8 @@ from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name from .._lease import get_access_conditions -from .._models import ContainerProperties, BlobProperties, BlobType # pylint: disable=unused-import -from ._models import BlobPropertiesPaged, BlobPrefix +from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import +from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix from ._lease_async import BlobLeaseClient from ._blob_client_async import BlobClient @@ -658,6 +658,12 @@ async def upload_blob( and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to @@ -768,6 +774,12 @@ async def delete_blob( and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -827,6 +839,12 @@ async def download_blob(self, blob, offset=None, length=None, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: Encrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. @@ -886,6 +904,8 @@ async def delete_blobs( # pylint: disable=arguments-differ key: 'etag', value type: str match the etag or not: key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str lease: key: 'lease_id', value type: Union[str, LeaseClient] timeout for subrequest: @@ -908,6 +928,12 @@ async def delete_blobs( # pylint: disable=arguments-differ If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword bool raise_on_any_failure: This is a boolean param which defaults to True. When this is set, an exception is raised even if there is a single operation failure. For optimal performance, @@ -926,6 +952,9 @@ async def delete_blobs( # pylint: disable=arguments-differ :dedent: 12 :caption: Deleting multiple blobs. """ + if len(blobs) == 0: + return iter(list()) + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) return await self._batch_send(*reqs, **options) @@ -969,12 +998,20 @@ async def set_standard_blob_tier_blobs( key: 'rehydrate_priority', value type: RehydratePriority lease: key: 'lease_id', value type: Union[str, LeaseClient] + tags match condition: + key: 'if_tags_match_condition', value type: str timeout for subrequest: key: 'timeout', value type: int :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :keyword bool raise_on_any_failure: diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py index ea83862..c698cb4 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method import asyncio import sys diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py index ecd9076..5f68a9b 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method from typing import ( # pylint: disable=unused-import Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, @@ -91,6 +92,12 @@ async def acquire(self, lease_duration=-1, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -108,7 +115,7 @@ async def acquire(self, lease_duration=-1, **kwargs): process_storage_error(error) self.id = response.get('lease_id') # type: str self.last_modified = response.get('last_modified') # type: datetime - self.etag = kwargs.get('etag') # type: str + self.etag = response.get('etag') # type: str @distributed_trace_async async def renew(self, **kwargs): @@ -138,6 +145,12 @@ async def renew(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: None @@ -182,6 +195,12 @@ async def release(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: None @@ -225,6 +244,12 @@ async def change(self, proposed_lease_id, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: None @@ -278,6 +303,12 @@ async def break_lease(self, lease_break_period=None, **kwargs): If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: Approximate time remaining in the lease period, in seconds. diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py new file mode 100644 index 0000000..dc09846 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py @@ -0,0 +1,162 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged +from .._deserialize import get_blob_properties_from_generated_code +from .._models import BlobProperties +from .._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix +from .._shared.models import DictMixin +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The container that the blobs are listed from. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefix(AsyncItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token of the current page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + async def _extract_data_cb(self, get_next_return): + continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py index 806c62c..44d5d63 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py +++ b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py @@ -6,17 +6,13 @@ # pylint: disable=too-few-public-methods, too-many-instance-attributes # pylint: disable=super-init-not-called, too-many-lines -from typing import List, Any, TYPE_CHECKING # pylint: disable=unused-import +from azure.core.async_paging import AsyncPageIterator -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged - -from .._models import BlobProperties, ContainerProperties, FilteredBlob +from .._models import ContainerProperties, FilteredBlob from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._shared.models import DictMixin from .._generated.models import StorageErrorException -from .._generated.models import BlobPrefix as GenBlobPrefix -from .._generated.models import BlobItemInternal, FilterBlobItem +from .._generated.models import FilterBlobItem class ContainerPropertiesPaged(AsyncPageIterator): @@ -77,90 +73,6 @@ def _build_item(item): return ContainerProperties._from_generated(item) # pylint: disable=protected-access -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = BlobProperties._from_generated(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - class FilteredBlobPaged(AsyncPageIterator): """An Iterable of Blob properties. @@ -227,68 +139,3 @@ def _build_item(item): blob = FilteredBlob(name=item.name, container_name=item.container_name, tag_value=item.tag_value) # pylint: disable=protected-access return blob return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py new file mode 100644 index 0000000..a86368c --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py @@ -0,0 +1,79 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._download import StorageStreamDownloader +from ._data_lake_file_client import DataLakeFileClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._file_system_client import FileSystemClient +from ._data_lake_service_client import DataLakeServiceClient +from ._data_lake_lease import DataLakeLeaseClient +from ._models import ( + LocationMode, + ResourceTypes, + FileSystemProperties, + FileSystemPropertiesPaged, + DirectoryProperties, + FileProperties, + PathProperties, + PathPropertiesPaged, + LeaseProperties, + ContentSettings, + AccountSasPermissions, + FileSystemSasPermissions, + DirectorySasPermissions, + FileSasPermissions, + UserDelegationKey, + PublicAccess, + AccessPolicy, + DelimitedTextDialect, + DelimitedJsonDialect, + DataLakeFileQueryError +) +from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ + generate_file_sas + +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import StorageErrorCode +from ._version import VERSION + +__version__ = VERSION + +__all__ = [ + 'DataLakeServiceClient', + 'FileSystemClient', + 'DataLakeFileClient', + 'DataLakeDirectoryClient', + 'DataLakeLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'PublicAccess', + 'AccessPolicy', + 'ResourceTypes', + 'StorageErrorCode', + 'UserDelegationKey', + 'FileSystemProperties', + 'FileSystemPropertiesPaged', + 'DirectoryProperties', + 'FileProperties', + 'PathProperties', + 'PathPropertiesPaged', + 'LeaseProperties', + 'ContentSettings', + 'AccountSasPermissions', + 'FileSystemSasPermissions', + 'DirectorySasPermissions', + 'FileSasPermissions', + 'generate_account_sas', + 'generate_file_system_sas', + 'generate_directory_sas', + 'generate_file_sas', + 'VERSION', + 'StorageStreamDownloader', + 'DelimitedTextDialect', + 'DelimitedJsonDialect', + 'DataLakeFileQueryError' +] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py new file mode 100644 index 0000000..90c525b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py @@ -0,0 +1,523 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from ._shared.base_client import parse_connection_str +from ._data_lake_file_client import DataLakeFileClient +from ._models import DirectoryProperties +from ._path_client import PathClient + + +class DataLakeDirectoryClient(PathClient): + """A client to interact with the DataLake directory, even if the directory may not yet exist. + + For operations relating to a specific subdirectory or file under the directory, a directory client or file client + can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param directory_name: + The whole path of the directory. eg. {directory under file system}/{directory to interact with} + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client.py + :start-after: [START instantiate_directory_client_from_conn_str] + :end-before: [END instantiate_directory_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, + credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeDirectoryClient + """ + Create DataLakeDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: + The name of file system to interact with. + :type file_system_name: str + :param directory_name: + The name of directory to interact with. The directory is under file system. + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeDirectoryClient + :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, directory_name=directory_name, + credential=credential, **kwargs) + + def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new directory. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 8 + :caption: Create directory. + """ + return self._create('directory', metadata=metadata, **kwargs) + + def delete_directory(self, **kwargs): + # type: (...) -> None + """ + Marks the specified directory for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 4 + :caption: Delete directory. + """ + return self._delete(**kwargs) + + def get_directory_properties(self, **kwargs): + # type: (**Any) -> DirectoryProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the directory. It does not return the content of the directory. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START get_directory_properties] + :end-before: [END get_directory_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file/directory. + """ + blob_properties = self._get_path_properties(**kwargs) + return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + + def rename_directory(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeDirectoryClient + """ + Rename the source directory. + + :param str new_name: + the new directory name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}". + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START rename_directory] + :end-before: [END rename_directory] + :language: python + :dedent: 4 + :caption: Rename the source directory. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + path = new_name[len(new_file_system):] + + new_directory_client = DataLakeDirectoryClient( + self.url, new_file_system, directory_name=path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name, # pylint: disable=protected-access + **kwargs) + return new_directory_client + + def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create a subdirectory and return the subdirectory client to be interacted with. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory. + """ + subdir = self.get_sub_directory_client(sub_directory) + subdir.create_directory(metadata=metadata, **kwargs) + return subdir + + def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified subdirectory for deletion. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory + """ + subdir = self.get_sub_directory_client(sub_directory) + subdir.delete_directory(**kwargs) + return subdir + + def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create a new file and return the file client to be interacted with. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + """ + file_client = self.get_file_client(file) + file_client.create_file(**kwargs) + return file_client + + def get_file_client(self, file # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. eg. directory/subdirectory/file + :type file: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake..DataLakeFileClient + """ + try: + file_path = file.name + except AttributeError: + file_path = self.path_name + '/' + file + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified subdirectory of the current directory. + + The sub subdirectory need not already exist. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + try: + subdir_path = sub_directory.name + except AttributeError: + subdir_path = self.path_name + '/' + sub_directory + + return DataLakeDirectoryClient( + self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py new file mode 100644 index 0000000..db076b8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py @@ -0,0 +1,708 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from io import BytesIO +import six + +from ._quick_query_helper import DataLakeFileQueryReader +from ._shared.base_client import parse_connection_str +from ._shared.request_handlers import get_length, read_length +from ._shared.response_handlers import return_response_headers +from ._shared.uploads import IterStreamer +from ._upload_helper import upload_datalake_file +from ._generated.models import StorageErrorException +from ._download import StorageStreamDownloader +from ._path_client import PathClient +from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers +from ._deserialize import process_storage_error +from ._models import FileProperties, DataLakeFileQueryError + + +class DataLakeFileClient(PathClient): + """A client to interact with the DataLake file, even if the file may not yet exist. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param file_path: + The whole file path, so that to interact with a specific file. + eg. "{directory}/{subdirectory}/{file}" + :type file_path: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client.py + :start-after: [START instantiate_file_client_from_conn_str] + :end-before: [END instantiate_file_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, + credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeFileClient + """ + Create DataLakeFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: The name of file system to interact with. + :type file_system_name: str + :param directory_name: The name of directory to interact with. The directory is under file system. + :type directory_name: str + :param file_name: The name of file to interact with. The file is under directory. + :type file_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeFileClient + :rtype ~azure.storage.filedatalake.DataLakeFileClient + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, file_path=file_path, + credential=credential, **kwargs) + + def create_file(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new file. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 4 + :caption: Create file. + """ + return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) + + def delete_file(self, **kwargs): + # type: (...) -> None + """ + Marks the specified file for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 4 + :caption: Delete file. + """ + return self._delete(**kwargs) + + def get_file_properties(self, **kwargs): + # type: (**Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. It does not return the content of the file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START get_file_properties] + :end-before: [END get_file_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file. + """ + blob_properties = self._get_path_properties(**kwargs) + return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + + def _upload_options( # pylint:disable=too-many-statements + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, six.text_type): + data = data.encode(encoding) # type: ignore + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + + kwargs['properties'] = add_metadata_headers(metadata) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) + + if content_settings: + kwargs['path_http_headers'] = get_path_http_headers(content_settings) + + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['validate_content'] = validate_content + kwargs['max_concurrency'] = max_concurrency + kwargs['client'] = self._client.path + + return kwargs + + def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + overwrite=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Upload data to a file. + + :param data: Content to be uploaded to file + :param int length: Size of the data in bytes. + :param bool overwrite: to overwrite an existing file or not. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the blob as metadata. + :paramtype metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. + :return: response dict (Etag and last modified). + """ + options = self._upload_options( + data, + length=length, + overwrite=overwrite, + **kwargs) + return upload_datalake_file(**options) + + @staticmethod + def _append_data_options(data, offset, length=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + options = { + 'body': data, + 'position': offset, + 'content_length': length, + 'lease_access_conditions': access_conditions, + 'validate_content': kwargs.pop('validate_content', False), + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + offset, # type: int + length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Append data to the file. + + :param data: Content to be appended to file + :param offset: start position of the data to be appended to. + :param length: Size of the data in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :return: dict of the response header + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START append_data] + :end-before: [END append_data] + :language: python + :dedent: 4 + :caption: Append data to the file. + """ + options = self._append_data_options( + data, + offset, + length=length, + **kwargs) + try: + return self._client.path.append_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'position': offset, + 'content_length': 0, + 'path_http_headers': path_http_headers, + 'retain_uncommitted_data': retain_uncommitted_data, + 'close': kwargs.pop('close', False), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def flush_data(self, offset, # type: int + retain_uncommitted_data=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ Commit the previous appended data. + + :param offset: offset is equal to the length of the file after commit the + previous appended data. + :param bool retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword bool close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :return: response header in dict + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START upload_file_to_file_system] + :end-before: [END upload_file_to_file_system] + :language: python + :dedent: 8 + :caption: Commit the previous appended data. + """ + options = self._flush_data_options( + offset, + retain_uncommitted_data=retain_uncommitted_data, **kwargs) + try: + return self._client.path.flush_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + def download_file(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content, or readinto() must be used to download the file into + a stream. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword lease: + If specified, download only succeeds if the file's lease is active + and matches this ID. Required if the file has an active lease. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.filedatalake.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START read_file] + :end-before: [END read_file] + :language: python + :dedent: 4 + :caption: Return the downloaded data. + """ + downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) + return StorageStreamDownloader(downloader) + + def rename_file(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeFileClient + """ + Rename the source file. + + :param str new_name: the new file name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: the renamed file client + :rtype: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START rename_file] + :end-before: [END rename_file] + :language: python + :dedent: 4 + :caption: Rename the source file. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + path = new_name[len(new_file_system):] + + new_directory_client = DataLakeFileClient( + self.url, new_file_system, file_path=path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name, # pylint: disable=protected-access + **kwargs) + return new_directory_client + + def query_file(self, query_expression, **kwargs): + # type: (str, **Any) -> DataLakeFileQueryReader + """Enables users to select/project on datalake file data by providing simple query expressions. + This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + eg. Select * from DataLakeStorage + :keyword Callable[Exception] on_error: + A function to be called on any processing errors returned by the service. + :keyword file_format: + Optional. Defines the serialization of the data currently stored in the file. The default is to + treat the file data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. + :paramtype file_format: + ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the file. By providing an output format, the file data will be reformatted + according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. + :paramtype output_format: + ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (DataLakeFileQueryReader) + :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on datalake file data by providing simple query expressions. + """ + query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") + blob_quick_query_reader = self._blob_client.query_blob(query_expression, + blob_format=kwargs.pop('file_format', None), + error_cls=DataLakeFileQueryError, + **kwargs) + return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py new file mode 100644 index 0000000..d896ccb --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py @@ -0,0 +1,245 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, + TypeVar, TYPE_CHECKING +) +from azure.multiapi.storagev2.blob.v2019_12_12 import BlobLeaseClient + + +if TYPE_CHECKING: + from datetime import datetime + FileSystemClient = TypeVar("FileSystemClient") + DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") + DataLakeFileClient = TypeVar("DataLakeFileClient") + + +class DataLakeLeaseClient(object): + """Creates a new DataLakeLeaseClient. + + This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file system, directory, or file to lease. + :type client: ~azure.storage.filedatalake.FileSystemClient or + ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + + if hasattr(client, '_blob_client'): + _client = client._blob_client # type: ignore # pylint: disable=protected-access + elif hasattr(client, '_container_client'): + _client = client._container_client # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") + + self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Optional[int], **Any) -> None + """Requests a new lease. + + If the file/file system does not have an active lease, the DataLake service creates a + lease on the file/file system and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) + self._update_lease_client_attributes() + + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the file system or file. Note that + the lease may be renewed even if it has expired as long as the file system + or file has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.renew(**kwargs) + self._update_lease_client_attributes() + + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the file system or file. Releasing the lease allows another client + to immediately acquire the lease for the file system or file as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.release(**kwargs) + self._update_lease_client_attributes() + + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) + self._update_lease_client_attributes() + + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the file system or file has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the file system or file. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) + + def _update_lease_client_attributes(self): + self.id = self._blob_lease_client.id # type: str + self.last_modified = self._blob_lease_client.last_modified # type: datetime + self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py new file mode 100644 index 0000000..acb8ca1 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py @@ -0,0 +1,421 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged + +from azure.multiapi.storagev2.blob.v2019_12_12 import BlobServiceClient +from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str +from ._file_system_client import FileSystemClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._data_lake_file_client import DataLakeFileClient +from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode +from ._serialize import convert_dfs_url_to_blob_url + + +class DataLakeServiceClient(StorageAccountHostsMixin): + """A client to interact with the DataLake Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete file systems within the account. + For operations relating to a specific file system, directory or file, clients for those entities + can also be retrieved using the `get_client` functions. + + :ivar str url: + The full endpoint URL to the datalake service endpoint. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URL to the DataLake storage account. Any other entities included + in the URL path (e.g. file system or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient from connection string. + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + self._blob_account_url = blob_account_url + self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) + self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access + + _, sas_token = parse_query(parsed_url.query) + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', + credential=self._raw_credential, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + + def __exit__(self, *args): + self._blob_service_client.close() + super(DataLakeServiceClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._blob_service_client.close() + self.__exit__() + + def _format_url(self, hostname): + """Format the endpoint URL according to hostname + """ + formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) + return formated_url + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeServiceClient + """ + Create DataLakeServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeServiceClient + :rtype ~azure.storage.filedatalake.DataLakeServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_data_lake_service_client_from_conn_str] + :end-before: [END create_data_lake_service_client_from_conn_str] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient from a connection string. + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls(account_url, credential=credential, **kwargs) + + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.filedatalake.UserDelegationKey + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_user_delegation_key] + :end-before: [END get_user_delegation_key] + :language: python + :dedent: 8 + :caption: Get user delegation key from datalake service client. + """ + delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, + key_expiry_time=key_expiry_time, + **kwargs) # pylint: disable=protected-access + return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + + def list_file_systems(self, name_starts_with=None, # type: Optional[str] + include_metadata=None, # type: Optional[bool] + **kwargs): + # type: (...) -> ItemPaged[FileSystemProperties] + """Returns a generator to list the file systems under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all file systems have been returned. + + :param str name_starts_with: + Filters the results to return only file systems whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that file system metadata be returned in the response. + The default value is `False`. + :keyword int results_per_page: + The maximum number of file system names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of FileSystemProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START list_file_systems] + :end-before: [END list_file_systems] + :language: python + :dedent: 8 + :caption: Listing the file systems in the datalake service. + """ + item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, + include_metadata=include_metadata, + **kwargs) # pylint: disable=protected-access + item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access + return item_paged + + def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> FileSystemClient + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param str file_system: + The name of the file system to create. + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: file system, file. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_file_system_from_service_client] + :end-before: [END create_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Creating a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) + return file_system_client + + def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] + **kwargs): + # type: (...) -> FileSystemClient + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :param file_system: + The file system to delete. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START delete_file_system_from_service_client] + :end-before: [END delete_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Deleting a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + file_system_client.delete_file_system(**kwargs) + return file_system_client + + def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] + ): + # type: (...) -> FileSystemClient + """Get a client to interact with the specified file system. + + The file system need not already exist. + + :param file_system: + The file system. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :returns: A FileSystemClient. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Getting the file system client to interact with a specific file system. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + + return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, + _configuration=self._config, + _pipeline=self._pipeline, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] + directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param file_system: + The file system that the directory is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_directory_client_from_service_client] + :end-before: [END get_directory_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] + file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_system: + The file system that the file is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param file_path: + The file with which to interact. This can either be the full path of the file(from the root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake..DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_file_client_from_service_client] + :end-before: [END get_file_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py new file mode 100644 index 0000000..9d0881a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py @@ -0,0 +1,106 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +from typing import ( # pylint: disable=unused-import + TYPE_CHECKING +) + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ + ResourceNotFoundError, ResourceExistsError +from ._shared.models import StorageErrorCode + +if TYPE_CHECKING: + pass + +_LOGGER = logging.getLogger(__name__) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = value + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_headers_and_deserialized_path_list(response, deserialized, response_headers): # pylint: disable=unused-argument + return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers) + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body: + if info == 'code': + error_code = error_body[info] + elif info == 'message': + error_message = error_body[info] + else: + additional_data[info] = error_body[info] + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.invalid_property_name, + StorageErrorCode.invalid_source_uri, + StorageErrorCode.source_path_not_found, + StorageErrorCode.lease_name_mismatch, + StorageErrorCode.file_system_not_found, + StorageErrorCode.path_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.invalid_destination_path, + StorageErrorCode.invalid_rename_source_path, + StorageErrorCode.lease_is_already_broken, + StorageErrorCode.invalid_source_or_destination_resource_type, + StorageErrorCode.rename_destination_parent_path_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.source_path_is_being_deleted, + StorageErrorCode.path_already_exists, + StorageErrorCode.destination_path_is_being_deleted, + StorageErrorCode.file_system_already_exists, + StorageErrorCode.file_system_being_deleted, + StorageErrorCode.path_conflict]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py new file mode 100644 index 0000000..181b503 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py @@ -0,0 +1,53 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._models import FileProperties + + +class StorageStreamDownloader(object): + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar ~azure.storage.filedatalake.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__(self, downloader): + self._downloader = downloader + self.name = self._downloader.name + self.properties = FileProperties._from_blob_properties(self._downloader.properties) # pylint: disable=protected-access + self.size = self._downloader.size + + def __len__(self): + return self.size + + def chunks(self): + return self._downloader.chunks() + + def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + return self._downloader.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py new file mode 100644 index 0000000..51e6cbd --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py @@ -0,0 +1,782 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import functools + +try: + from urllib.parse import urlparse, quote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote # type: ignore + +import six +from azure.core.paging import ItemPaged +from azure.multiapi.storagev2.blob.v2019_12_12 import ContainerClient +from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str +from ._serialize import convert_dfs_url_to_blob_url +from ._models import LocationMode, FileSystemProperties, PathPropertiesPaged, PublicAccess +from ._data_lake_file_client import DataLakeFileClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._data_lake_lease import DataLakeLeaseClient +from ._generated import DataLakeStorageClient + + +class FileSystemClient(StorageAccountHostsMixin): + """A client to interact with a specific file system, even if that file system + may not yet exist. + + For operations relating to a specific directory or file within this file system, a directory client or file client + can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not file_system_name: + raise ValueError("Please specify a file system name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + # TODO: add self.account_url to base_client and remove _blob_account_url + self._blob_account_url = blob_account_url + + datalake_hosts = kwargs.pop('_hosts', None) + blob_hosts = None + if datalake_hosts: + blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + self._container_client = ContainerClient(blob_account_url, file_system_name, + credential=credential, _hosts=blob_hosts, **kwargs) + + _, sas_token = parse_query(parsed_url.query) + self.file_system_name = file_system_name + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, + _hosts=datalake_hosts, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) + + def _format_url(self, hostname): + file_system_name = self.file_system_name + if isinstance(file_system_name, six.text_type): + file_system_name = file_system_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(file_system_name), + self._query_str) + + def __exit__(self, *args): + self._container_client.close() + super(FileSystemClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._container_client.close() + self.__exit__() + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> FileSystemClient + """ + Create FileSystemClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: The name of file system to interact with. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a FileSystemClient + :rtype ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_connection_string] + :end-before: [END create_file_system_client_from_connection_string] + :language: python + :dedent: 8 + :caption: Create FileSystemClient from connection string + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, credential=credential, **kwargs) + + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file system does not have an active lease, + the DataLake service creates a lease on the file system and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system] + :end-before: [END create_file_system] + :language: python + :dedent: 12 + :caption: Creating a file system in the datalake service. + """ + return self._container_client.create_container(metadata=metadata, + public_access=public_access, + **kwargs) + + def delete_file_system(self, **kwargs): + # type: (Any) -> None + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_file_system] + :end-before: [END delete_file_system] + :language: python + :dedent: 12 + :caption: Deleting a file system in the datalake service. + """ + self._container_client.delete_container(**kwargs) + + def get_file_system_properties(self, **kwargs): + # type: (Any) -> FileSystemProperties + """Returns all user-defined metadata and system properties for the specified + file system. The data returned does not include the file system's list of paths. + + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, get_file_system_properties only succeeds if the + file system's lease is active and matches this ID. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified file system within a file system object. + :rtype: ~azure.storage.filedatalake.FileSystemProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_file_system_properties] + :end-before: [END get_file_system_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the file system. + """ + container_properties = self._container_client.get_container_properties(**kwargs) + return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access + + def set_file_system_metadata( # type: ignore + self, metadata, # type: Dict[str, str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: filesystem-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the file system. + """ + return self._container_client.set_container_metadata(metadata=metadata, **kwargs) + + def set_file_system_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified file system or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a file system may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the file system. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] + :param ~azure.storage.filedatalake.PublicAccess public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :keyword lease: + Required if the file system has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File System-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + """ + return self._container_client.set_container_access_policy(signed_identifiers, + public_access=public_access, **kwargs) + + def get_file_system_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified file system. + The permissions indicate whether file system data may be accessed publicly. + + :keyword lease: + If specified, the operation only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_policy = self._container_client.get_container_access_policy(**kwargs) + return { + 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access + 'signed_identifiers': access_policy['signed_identifiers'] + } + + def get_paths(self, path=None, # type: Optional[str] + recursive=True, # type: Optional[bool] + max_results=None, # type: Optional[int] + **kwargs): + # type: (...) -> ItemPaged[PathProperties] + """Returns a generator to list the paths(could be files or directories) under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str path: + Filters the results to return only paths under the specified path. + :param int max_results: An optional value that specifies the maximum + number of items to return per page. If omitted or greater than 5,000, the + response will include up to 5,000 items per page. + :keyword upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of PathProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_paths_in_file_system] + :end-before: [END get_paths_in_file_system] + :language: python + :dedent: 8 + :caption: List the paths in the file system. + """ + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.file_system.list_paths, + path=path, + timeout=timeout, + **kwargs) + return ItemPaged( + command, recursive, path=path, max_results=max_results, + page_iterator_class=PathPropertiesPaged, **kwargs) + + def create_directory(self, directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create directory + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_directory_from_file_system] + :end-before: [END create_directory_from_file_system] + :language: python + :dedent: 8 + :caption: Create directory in the file system. + """ + directory_client = self.get_directory_client(directory) + directory_client.create_directory(metadata=metadata, **kwargs) + return directory_client + + def delete_directory(self, directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified path for deletion. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_directory_from_file_system] + :end-before: [END delete_directory_from_file_system] + :language: python + :dedent: 8 + :caption: Delete directory in the file system. + """ + directory_client = self.get_directory_client(directory) + directory_client.delete_directory(**kwargs) + return directory_client + + def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create file + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_from_file_system] + :end-before: [END create_file_from_file_system] + :language: python + :dedent: 8 + :caption: Create file in the file system. + """ + file_client = self.get_file_client(file) + file_client.create_file(**kwargs) + return file_client + + def delete_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Marks the specified file for deletion. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_file_from_file_system] + :end-before: [END delete_file_from_file_system] + :language: python + :dedent: 8 + :caption: Delete file in the file system. + """ + file_client = self.get_file_client(file) + file_client.delete_file(**kwargs) + return file_client + + def _get_root_directory_client(self): + # type: () -> DataLakeDirectoryClient + """Get a client to interact with the root directory. + + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + return self.get_directory_client('/') + + def get_directory_client(self, directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_directory_client_from_file_system] + :end-before: [END get_directory_client_from_file_system] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_path: + The file with which to interact. This can either be the path of the file(from root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake..DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_file_client_from_file_system] + :end-before: [END get_file_client_from_file_system] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py new file mode 100644 index 0000000..2c90133 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._data_lake_storage_client import DataLakeStorageClient +__all__ = ['DataLakeStorageClient'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py new file mode 100644 index 0000000..5fc3466 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .version import VERSION + + +class DataLakeStorageClientConfiguration(Configuration): + """Configuration for DataLakeStorageClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + :ivar resource: The value must be "filesystem" for all filesystem + operations. + :type resource: str + :ivar version: Specifies the version of the operation to use for this + request. + :type version: str + """ + + def __init__(self, url, file_system, path1, **kwargs): + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + if file_system is None: + raise ValueError("Parameter 'file_system' must not be None.") + + super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) + self.generate_client_request_id = True + + self.url = url + self.file_system = file_system + self.path1 = path1 + self.resource = "filesystem" + self.version = "2019-12-12" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py new file mode 100644 index 0000000..dcc65ad --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import PipelineClient +from msrest import Serializer, Deserializer + +from ._configuration import DataLakeStorageClientConfiguration +from azure.core.exceptions import map_error +from .operations import ServiceOperations +from .operations import FileSystemOperations +from .operations import PathOperations +from . import models + + +class DataLakeStorageClient(object): + """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. + + + :ivar service: Service operations + :vartype service: azure.storage.filedatalake.operations.ServiceOperations + :ivar file_system: FileSystem operations + :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations + :ivar path: Path operations + :vartype path: azure.storage.filedatalake.operations.PathOperations + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + """ + + def __init__(self, url, file_system, path1, **kwargs): + + base_url = '{url}' + self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2019-12-12' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file_system = FileSystemOperations( + self._client, self._config, self._serialize, self._deserialize) + self.path = PathOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py new file mode 100644 index 0000000..5f09159 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._data_lake_storage_client_async import DataLakeStorageClient +__all__ = ['DataLakeStorageClient'] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py new file mode 100644 index 0000000..5aaa28b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ..version import VERSION + + +class DataLakeStorageClientConfiguration(Configuration): + """Configuration for DataLakeStorageClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + :ivar resource: The value must be "filesystem" for all filesystem + operations. + :type resource: str + :ivar version: Specifies the version of the operation to use for this + request. + :type version: str + """ + + def __init__(self, url, file_system, path1, **kwargs): + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) + self.generate_client_request_id = True + self.accept_language = None + + self.url = url + self.file_system = file_system + self.path1 = path1 + self.resource = "filesystem" + self.version = "2019-12-12" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py new file mode 100644 index 0000000..929fece --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import AsyncPipelineClient +from msrest import Serializer, Deserializer + +from ._configuration_async import DataLakeStorageClientConfiguration +from azure.core.exceptions import map_error +from .operations_async import ServiceOperations +from .operations_async import FileSystemOperations +from .operations_async import PathOperations +from .. import models + + +class DataLakeStorageClient(object): + """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. + + + :ivar service: Service operations + :vartype service: azure.storage.filedatalake.aio.operations_async.ServiceOperations + :ivar file_system: FileSystem operations + :vartype file_system: azure.storage.filedatalake.aio.operations_async.FileSystemOperations + :ivar path: Path operations + :vartype path: azure.storage.filedatalake.aio.operations_async.PathOperations + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + """ + + def __init__( + self, url, file_system, path1, **kwargs): + + base_url = '{url}' + self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2019-12-12' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file_system = FileSystemOperations( + self._client, self._config, self._serialize, self._deserialize) + self.path = PathOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py new file mode 100644 index 0000000..1190e52 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations_async import ServiceOperations +from ._file_system_operations_async import FileSystemOperations +from ._path_operations_async import PathOperations + +__all__ = [ + 'ServiceOperations', + 'FileSystemOperations', + 'PathOperations', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py new file mode 100644 index 0000000..f1af068 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py @@ -0,0 +1,462 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class FileSystemOperations: + """FileSystemOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + async def create(self, properties=None, request_id=None, timeout=None, *, cls=None, **kwargs): + """Create FileSystem. + + Create a FileSystem rooted at the specified location. If the FileSystem + already exists, the operation fails. This operation does not support + conditional HTTP requests. + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}'} + + async def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set FileSystem Properties. + + Set properties for the FileSystem. This operation supports conditional + HTTP requests. For more information, see [Specifying Conditional + Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{filesystem}'} + + async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwargs): + """Get FileSystem Properties. + + All system and user-defined filesystem properties are specified in the + response headers. + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}'} + + async def delete(self, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Delete FileSystem. + + Marks the FileSystem for deletion. When a FileSystem is deleted, a + FileSystem with the same identifier cannot be created for at least 30 + seconds. While the filesystem is being deleted, attempts to create a + filesystem with the same identifier will fail with status code 409 + (Conflict), with the service returning additional error information + indicating that the filesystem is being deleted. All other operations, + including operations on any files or directories within the filesystem, + will fail with status code 404 (Not Found) while the filesystem is + being deleted. This operation supports conditional HTTP requests. For + more information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}'} + + async def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, *, cls=None, **kwargs): + """List Paths. + + List FileSystem paths and their properties. + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param path: Optional. Filters results to paths within the specified + directory. An error occurs if the directory does not exist. + :type path: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: PathList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.PathList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_paths.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if path is not None: + query_parameters['directory'] = self._serialize.query("path", path, 'str') + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PathList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py new file mode 100644 index 0000000..0e8a109 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py @@ -0,0 +1,1600 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class PathOperations: + """PathOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + async def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """Create File | Create Directory | Rename File | Rename Directory. + + Create or rename a file or directory. By default, the destination is + overwritten and if the destination already exists and has a lease the + lease is broken. This operation supports conditional HTTP requests. + For more information, see [Specifying Conditional Headers for Blob + Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param resource: Required only for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource: str or + ~azure.storage.filedatalake.models.PathResourceType + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param mode: Optional. Valid only when namespace is enabled. This + parameter determines the behavior of the rename operation. The value + must be "legacy" or "posix", and the default value will be "posix". + Possible values include: 'legacy', 'posix' + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode + :param rename_source: An optional file or directory to be renamed. + The value must have the following format: "/{filesystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. This value must be a URL percent-encoded string. Note that + the string may only contain ASCII characters in the ISO-8859-1 + character set. + :type rename_source: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :type source_lease_id: str + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param umask: Optional and only valid if Hierarchical Namespace is + enabled for the account. When creating a file or directory and the + parent folder does not have a default ACL, the umask restricts the + permissions of the file or directory to be created. The resulting + permission is given by p bitwise and not u, where p is the permission + and u is the umask. For example, if p is 0777 and u is 0057, then the + resulting permission is 0720. The default permission is 0777 for a + directory and 0666 for a file. The default umask is 0027. The umask + must be specified in 4-digit octal notation (e.g. 0766). + :type umask: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.filedatalake.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if resource is not None: + query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if rename_source is not None: + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}/{path}'} + + async def update(self, action, body, mode=None, max_records=None, continuation=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Append Data | Flush Data | Set Properties | Set Access Control. + + Uploads data to be appended to a file, flushes (writes) previously + uploaded data to a file, sets properties for a file or directory, or + sets access control for a file or directory. Data can only be appended + to a file. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: The action must be "append" to upload data to be + appended to a file, "flush" to flush previously uploaded data to a + file, "setProperties" to set the properties of a file or directory, + "setAccessControl" to set the owner, group, permissions, or access + control list for a file or directory, or "setAccessControlRecursive" + to set the access control list for a directory recursively. Note that + Hierarchical Namespace must be enabled for the account in order to use + access control. Also note that the Access Control List (ACL) includes + permissions for the owner, owning group, and others, so the + x-ms-permissions and x-ms-acl request headers are mutually exclusive. + Possible values include: 'append', 'flush', 'setProperties', + 'setAccessControl', 'setAccessControlRecursive' + :type action: str or + ~azure.storage.filedatalake.models.PathUpdateAction + :param body: Initial data + :type body: Generator + :param mode: Optional. Valid and Required for + "SetAccessControlRecursive" operation. Mode "set" sets POSIX access + control rights on files and directories, "modify" modifies one or more + POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were + present earlier on files and directories. Possible values include: + 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param max_records: Optional. Valid for "SetAccessControlRecursive" + operation. It specifies the maximum number of files or directories on + which the acl change will be applied. If omitted or greater than + 2,000, the request will process up to 2,000 items + :type max_records: int + :param continuation: Optional. The number of paths processed with each + invocation is limited. If the number of paths to be processed exceeds + this limit, a continuation token is returned in the response header + x-ms-continuation. When a continuation token is returned in the + response, it must be percent-encoded and specified in a subsequent + invocation of setAcessControlRecursive operation. + :type continuation: str + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/octet-stream' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + update.metadata = {'url': '/{filesystem}/{path}'} + + async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Lease Path. + + Create and manage a lease to restrict write and delete access to the + path. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param x_ms_lease_action: There are five lease actions: "acquire", + "break", "change", "renew", and "release". Use "acquire" and specify + the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a + new lease. Use "break" to break an existing lease. When a lease is + broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the + file. When a lease is successfully broken, the response indicates the + interval in seconds until a new lease can be acquired. Use "change" + and specify the current lease ID in "x-ms-lease-id" and the new lease + ID in "x-ms-proposed-lease-id" to change the lease ID of an active + lease. Use "renew" and specify the "x-ms-lease-id" to renew an + existing lease. Use "release" and specify the "x-ms-lease-id" to + release a lease. Possible values include: 'acquire', 'break', + 'change', 'renew', 'release' + :type x_ms_lease_action: str or + ~azure.storage.filedatalake.models.PathLeaseAction + :param x_ms_lease_duration: The lease duration is required to acquire + a lease, and specifies the duration of the lease in seconds. The + lease duration must be between 15 and 60 seconds or -1 for infinite + lease. + :type x_ms_lease_duration: int + :param x_ms_lease_break_period: The lease break period duration is + optional to break a lease, and specifies the break period of the + lease in seconds. The lease break duration must be between 0 and 60 + seconds. + :type x_ms_lease_break_period: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') + if x_ms_lease_duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') + if x_ms_lease_break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + lease.metadata = {'url': '/{filesystem}/{path}'} + + async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Read File. + + Read the contents of a file. For read operations, range requests are + supported. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param range: The HTTP Range request header specifies one or more byte + ranges of the resource to be retrieved. + :type range: str + :param x_ms_range_get_content_md5: Optional. When this header is set + to "true" and specified together with the Range header, the service + returns the MD5 hash for the range, as long as the range is less than + or equal to 4MB in size. If this header is specified without the Range + header, the service returns status code 400 (Bad Request). If this + header is set to true when the range exceeds 4 MB in size, the service + returns status code 400 (Bad Request). + :type x_ms_range_get_content_md5: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.read.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if range is not None: + header_parameters['Range'] = self._serialize.header("range", range, 'str') + if x_ms_range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + read.metadata = {'url': '/{filesystem}/{path}'} + + async def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Get Properties | Get Status | Get Access Control List. + + Get Properties returns all system and user defined properties for a + path. Get Status returns all system defined properties for a path. Get + Access Control List returns the access control list for a path. This + operation supports conditional HTTP requests. For more information, + see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: Optional. If the value is "getStatus" only the system + defined properties for the path are returned. If the value is + "getAccessControl" the access control list is returned in the response + headers (Hierarchical Namespace must be enabled for the account), + otherwise the properties are returned. Possible values include: + 'getAccessControl', 'getStatus' + :type action: str or + ~azure.storage.filedatalake.models.PathGetPropertiesAction + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if action is not None: + query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}/{path}'} + + async def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Delete File | Delete Directory. + + Delete the file or directory. This operation supports conditional HTTP + requests. For more information, see [Specifying Conditional Headers + for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}/{path}'} + + async def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + async def set_access_control_recursive(self, mode, timeout=None, continuation=None, max_records=None, acl=None, request_id=None, *, cls=None, **kwargs): + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_records: Optional. It specifies the maximum number of files + or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + action = "setAccessControlRecursive" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} + + async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "flush" + + # Construct URL + url = self.flush_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + flush_data.metadata = {'url': '/{filesystem}/{path}'} + + async def append_data(self, body, position=None, timeout=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Append data to the file. + + :param body: Initial data + :type body: Generator + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + transactional_content_hash = None + if path_http_headers is not None: + transactional_content_hash = path_http_headers.transactional_content_hash + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + action = "append" + + # Construct URL + url = self.append_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_hash is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + append_data.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py new file mode 100644 index 0000000..b4cb9c5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar resource: The value must be "account" for all account operations. Constant value: "account". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.resource = "account" + + async def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, *, cls=None, **kwargs): + """List FileSystems. + + List filesystems and their properties in given account. + + :param prefix: Filters results to filesystems within the specified + prefix. + :type prefix: str + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FileSystemList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.FileSystemList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_file_systems.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FileSystemList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py new file mode 100644 index 0000000..4a3401a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AclFailedEntry + from ._models_py3 import FileSystem + from ._models_py3 import FileSystemList + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ModifiedAccessConditions + from ._models_py3 import Path + from ._models_py3 import PathHTTPHeaders + from ._models_py3 import PathList + from ._models_py3 import SetAccessControlRecursiveResponse + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StorageError, StorageErrorException + from ._models_py3 import StorageErrorError +except (SyntaxError, ImportError): + from ._models import AclFailedEntry + from ._models import FileSystem + from ._models import FileSystemList + from ._models import LeaseAccessConditions + from ._models import ModifiedAccessConditions + from ._models import Path + from ._models import PathHTTPHeaders + from ._models import PathList + from ._models import SetAccessControlRecursiveResponse + from ._models import SourceModifiedAccessConditions + from ._models import StorageError, StorageErrorException + from ._models import StorageErrorError +from ._data_lake_storage_client_enums import ( + PathGetPropertiesAction, + PathLeaseAction, + PathRenameMode, + PathResourceType, + PathSetAccessControlRecursiveMode, + PathUpdateAction, +) + +__all__ = [ + 'AclFailedEntry', + 'FileSystem', + 'FileSystemList', + 'LeaseAccessConditions', + 'ModifiedAccessConditions', + 'Path', + 'PathHTTPHeaders', + 'PathList', + 'SetAccessControlRecursiveResponse', + 'SourceModifiedAccessConditions', + 'StorageError', 'StorageErrorException', + 'StorageErrorError', + 'PathResourceType', + 'PathRenameMode', + 'PathUpdateAction', + 'PathSetAccessControlRecursiveMode', + 'PathLeaseAction', + 'PathGetPropertiesAction', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py new file mode 100644 index 0000000..35a1a57 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class PathResourceType(str, Enum): + + directory = "directory" + file = "file" + + +class PathRenameMode(str, Enum): + + legacy = "legacy" + posix = "posix" + + +class PathUpdateAction(str, Enum): + + append = "append" + flush = "flush" + set_properties = "setProperties" + set_access_control = "setAccessControl" + set_access_control_recursive = "setAccessControlRecursive" + + +class PathSetAccessControlRecursiveMode(str, Enum): + + set = "set" + modify = "modify" + remove = "remove" + + +class PathLeaseAction(str, Enum): + + acquire = "acquire" + break_enum = "break" + change = "change" + renew = "renew" + release = "release" + + +class PathGetPropertiesAction(str, Enum): + + get_access_control = "getAccessControl" + get_status = "getStatus" diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py new file mode 100644 index 0000000..2f44279 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AclFailedEntry(Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AclFailedEntry, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.error_message = kwargs.get('error_message', None) + + +class FileSystem(Model): + """FileSystem. + + :param name: + :type name: str + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileSystem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.last_modified = kwargs.get('last_modified', None) + self.e_tag = kwargs.get('e_tag', None) + + +class FileSystemList(Model): + """FileSystemList. + + :param filesystems: + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] + """ + + _attribute_map = { + 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, + } + + def __init__(self, **kwargs): + super(FileSystemList, self).__init__(**kwargs) + self.filesystems = kwargs.get('filesystems', None) + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param if_modified_since: Specify this header value to operate only on a + blob if it has been modified since the specified date/time. + :type if_modified_since: datetime + :param if_unmodified_since: Specify this header value to operate only on a + blob if it has not been modified since the specified date/time. + :type if_unmodified_since: datetime + :param if_match: Specify an ETag value to operate only on blobs with a + matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs + without a matching value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + + +class Path(Model): + """Path. + + :param name: + :type name: str + :param is_directory: Default value: False . + :type is_directory: bool + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + :param content_length: + :type content_length: long + :param owner: + :type owner: str + :param group: + :type group: str + :param permissions: + :type permissions: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'str'}, + 'permissions': {'key': 'permissions', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Path, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_directory = kwargs.get('is_directory', False) + self.last_modified = kwargs.get('last_modified', None) + self.e_tag = kwargs.get('e_tag', None) + self.content_length = kwargs.get('content_length', None) + self.owner = kwargs.get('owner', None) + self.group = kwargs.get('group', None) + self.permissions = kwargs.get('permissions', None) + + +class PathHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: Path_create, + Path_update, Path_flush_data, Path_append_data. + + :param cache_control: Optional. Sets the blob's cache control. If + specified, this property is stored with the blob and returned with a read + request. + :type cache_control: str + :param content_encoding: Optional. Sets the blob's content encoding. If + specified, this property is stored with the blob and returned with a read + request. + :type content_encoding: str + :param content_language: Optional. Set the blob's content language. If + specified, this property is stored with the blob and returned with a read + request. + :type content_language: str + :param content_disposition: Optional. Sets the blob's Content-Disposition + header. + :type content_disposition: str + :param content_type: Optional. Sets the blob's content type. If specified, + this property is stored with the blob and returned with a read request. + :type content_type: str + :param content_md5: Specify the transactional md5 for the body, to be + validated by the service. + :type content_md5: bytearray + :param transactional_content_hash: Specify the transactional md5 for the + body, to be validated by the service. + :type transactional_content_hash: bytearray + """ + + _attribute_map = { + 'cache_control': {'key': '', 'type': 'str'}, + 'content_encoding': {'key': '', 'type': 'str'}, + 'content_language': {'key': '', 'type': 'str'}, + 'content_disposition': {'key': '', 'type': 'str'}, + 'content_type': {'key': '', 'type': 'str'}, + 'content_md5': {'key': '', 'type': 'bytearray'}, + 'transactional_content_hash': {'key': '', 'type': 'bytearray'}, + } + + def __init__(self, **kwargs): + super(PathHTTPHeaders, self).__init__(**kwargs) + self.cache_control = kwargs.get('cache_control', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.content_type = kwargs.get('content_type', None) + self.content_md5 = kwargs.get('content_md5', None) + self.transactional_content_hash = kwargs.get('transactional_content_hash', None) + + +class PathList(Model): + """PathList. + + :param paths: + :type paths: list[~azure.storage.filedatalake.models.Path] + """ + + _attribute_map = { + 'paths': {'key': 'paths', 'type': '[Path]'}, + } + + def __init__(self, **kwargs): + super(PathList, self).__init__(**kwargs) + self.paths = kwargs.get('paths', None) + + +class SetAccessControlRecursiveResponse(Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: + list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__(self, **kwargs): + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = kwargs.get('directories_successful', None) + self.files_successful = kwargs.get('files_successful', None) + self.failure_count = kwargs.get('failure_count', None) + self.failed_entries = kwargs.get('failed_entries', None) + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for create operation. + + :param source_if_match: Specify an ETag value to operate only on blobs + with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on + blobs without a matching value. + :type source_if_none_match: str + :param source_if_modified_since: Specify this header value to operate only + on a blob if it has been modified since the specified date/time. + :type source_if_modified_since: datetime + :param source_if_unmodified_since: Specify this header value to operate + only on a blob if it has not been modified since the specified date/time. + :type source_if_unmodified_since: datetime + """ + + _attribute_map = { + 'source_if_match': {'key': '', 'type': 'str'}, + 'source_if_none_match': {'key': '', 'type': 'str'}, + 'source_if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match = kwargs.get('source_if_match', None) + self.source_if_none_match = kwargs.get('source_if_none_match', None) + self.source_if_modified_since = kwargs.get('source_if_modified_since', None) + self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) + + +class StorageError(Model): + """StorageError. + + :param error: The service error response object. + :type error: ~azure.storage.filedatalake.models.StorageErrorError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'StorageErrorError'}, + } + + def __init__(self, **kwargs): + super(StorageError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageErrorError(Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(StorageErrorError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py new file mode 100644 index 0000000..3ca8d84 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AclFailedEntry(Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, error_message: str=None, **kwargs) -> None: + super(AclFailedEntry, self).__init__(**kwargs) + self.name = name + self.type = type + self.error_message = error_message + + +class FileSystem(Model): + """FileSystem. + + :param name: + :type name: str + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, last_modified: str=None, e_tag: str=None, **kwargs) -> None: + super(FileSystem, self).__init__(**kwargs) + self.name = name + self.last_modified = last_modified + self.e_tag = e_tag + + +class FileSystemList(Model): + """FileSystemList. + + :param filesystems: + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] + """ + + _attribute_map = { + 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, + } + + def __init__(self, *, filesystems=None, **kwargs) -> None: + super(FileSystemList, self).__init__(**kwargs) + self.filesystems = filesystems + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str'}, + } + + def __init__(self, *, lease_id: str=None, **kwargs) -> None: + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param if_modified_since: Specify this header value to operate only on a + blob if it has been modified since the specified date/time. + :type if_modified_since: datetime + :param if_unmodified_since: Specify this header value to operate only on a + blob if it has not been modified since the specified date/time. + :type if_unmodified_since: datetime + :param if_match: Specify an ETag value to operate only on blobs with a + matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs + without a matching value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + } + + def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, **kwargs) -> None: + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + + +class Path(Model): + """Path. + + :param name: + :type name: str + :param is_directory: Default value: False . + :type is_directory: bool + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + :param content_length: + :type content_length: long + :param owner: + :type owner: str + :param group: + :type group: str + :param permissions: + :type permissions: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'str'}, + 'permissions': {'key': 'permissions', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, is_directory: bool=False, last_modified: str=None, e_tag: str=None, content_length: int=None, owner: str=None, group: str=None, permissions: str=None, **kwargs) -> None: + super(Path, self).__init__(**kwargs) + self.name = name + self.is_directory = is_directory + self.last_modified = last_modified + self.e_tag = e_tag + self.content_length = content_length + self.owner = owner + self.group = group + self.permissions = permissions + + +class PathHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: Path_create, + Path_update, Path_flush_data, Path_append_data. + + :param cache_control: Optional. Sets the blob's cache control. If + specified, this property is stored with the blob and returned with a read + request. + :type cache_control: str + :param content_encoding: Optional. Sets the blob's content encoding. If + specified, this property is stored with the blob and returned with a read + request. + :type content_encoding: str + :param content_language: Optional. Set the blob's content language. If + specified, this property is stored with the blob and returned with a read + request. + :type content_language: str + :param content_disposition: Optional. Sets the blob's Content-Disposition + header. + :type content_disposition: str + :param content_type: Optional. Sets the blob's content type. If specified, + this property is stored with the blob and returned with a read request. + :type content_type: str + :param content_md5: Specify the transactional md5 for the body, to be + validated by the service. + :type content_md5: bytearray + :param transactional_content_hash: Specify the transactional md5 for the + body, to be validated by the service. + :type transactional_content_hash: bytearray + """ + + _attribute_map = { + 'cache_control': {'key': '', 'type': 'str'}, + 'content_encoding': {'key': '', 'type': 'str'}, + 'content_language': {'key': '', 'type': 'str'}, + 'content_disposition': {'key': '', 'type': 'str'}, + 'content_type': {'key': '', 'type': 'str'}, + 'content_md5': {'key': '', 'type': 'bytearray'}, + 'transactional_content_hash': {'key': '', 'type': 'bytearray'}, + } + + def __init__(self, *, cache_control: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, content_type: str=None, content_md5: bytearray=None, transactional_content_hash: bytearray=None, **kwargs) -> None: + super(PathHTTPHeaders, self).__init__(**kwargs) + self.cache_control = cache_control + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + self.content_type = content_type + self.content_md5 = content_md5 + self.transactional_content_hash = transactional_content_hash + + +class PathList(Model): + """PathList. + + :param paths: + :type paths: list[~azure.storage.filedatalake.models.Path] + """ + + _attribute_map = { + 'paths': {'key': 'paths', 'type': '[Path]'}, + } + + def __init__(self, *, paths=None, **kwargs) -> None: + super(PathList, self).__init__(**kwargs) + self.paths = paths + + +class SetAccessControlRecursiveResponse(Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: + list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__(self, *, directories_successful: int=None, files_successful: int=None, failure_count: int=None, failed_entries=None, **kwargs) -> None: + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = directories_successful + self.files_successful = files_successful + self.failure_count = failure_count + self.failed_entries = failed_entries + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for create operation. + + :param source_if_match: Specify an ETag value to operate only on blobs + with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on + blobs without a matching value. + :type source_if_none_match: str + :param source_if_modified_since: Specify this header value to operate only + on a blob if it has been modified since the specified date/time. + :type source_if_modified_since: datetime + :param source_if_unmodified_since: Specify this header value to operate + only on a blob if it has not been modified since the specified date/time. + :type source_if_unmodified_since: datetime + """ + + _attribute_map = { + 'source_if_match': {'key': '', 'type': 'str'}, + 'source_if_none_match': {'key': '', 'type': 'str'}, + 'source_if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, source_if_match: str=None, source_if_none_match: str=None, source_if_modified_since=None, source_if_unmodified_since=None, **kwargs) -> None: + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + + +class StorageError(Model): + """StorageError. + + :param error: The service error response object. + :type error: ~azure.storage.filedatalake.models.StorageErrorError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'StorageErrorError'}, + } + + def __init__(self, *, error=None, **kwargs) -> None: + super(StorageError, self).__init__(**kwargs) + self.error = error + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageErrorError(Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: + super(StorageErrorError, self).__init__(**kwargs) + self.code = code + self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py new file mode 100644 index 0000000..9efa6df --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._file_system_operations import FileSystemOperations +from ._path_operations import PathOperations + +__all__ = [ + 'ServiceOperations', + 'FileSystemOperations', + 'PathOperations', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py new file mode 100644 index 0000000..b0d17ff --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py @@ -0,0 +1,462 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class FileSystemOperations(object): + """FileSystemOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + def create(self, properties=None, request_id=None, timeout=None, cls=None, **kwargs): + """Create FileSystem. + + Create a FileSystem rooted at the specified location. If the FileSystem + already exists, the operation fails. This operation does not support + conditional HTTP requests. + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}'} + + def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs): + """Set FileSystem Properties. + + Set properties for the FileSystem. This operation supports conditional + HTTP requests. For more information, see [Specifying Conditional + Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{filesystem}'} + + def get_properties(self, request_id=None, timeout=None, cls=None, **kwargs): + """Get FileSystem Properties. + + All system and user-defined filesystem properties are specified in the + response headers. + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}'} + + def delete(self, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs): + """Delete FileSystem. + + Marks the FileSystem for deletion. When a FileSystem is deleted, a + FileSystem with the same identifier cannot be created for at least 30 + seconds. While the filesystem is being deleted, attempts to create a + filesystem with the same identifier will fail with status code 409 + (Conflict), with the service returning additional error information + indicating that the filesystem is being deleted. All other operations, + including operations on any files or directories within the filesystem, + will fail with status code 404 (Not Found) while the filesystem is + being deleted. This operation supports conditional HTTP requests. For + more information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}'} + + def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, cls=None, **kwargs): + """List Paths. + + List FileSystem paths and their properties. + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param path: Optional. Filters results to paths within the specified + directory. An error occurs if the directory does not exist. + :type path: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: PathList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.PathList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_paths.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if path is not None: + query_parameters['directory'] = self._serialize.query("path", path, 'str') + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PathList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py new file mode 100644 index 0000000..58e7d7e --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py @@ -0,0 +1,1599 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class PathOperations(object): + """PathOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """Create File | Create Directory | Rename File | Rename Directory. + + Create or rename a file or directory. By default, the destination is + overwritten and if the destination already exists and has a lease the + lease is broken. This operation supports conditional HTTP requests. + For more information, see [Specifying Conditional Headers for Blob + Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param resource: Required only for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource: str or + ~azure.storage.filedatalake.models.PathResourceType + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param mode: Optional. Valid only when namespace is enabled. This + parameter determines the behavior of the rename operation. The value + must be "legacy" or "posix", and the default value will be "posix". + Possible values include: 'legacy', 'posix' + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode + :param rename_source: An optional file or directory to be renamed. + The value must have the following format: "/{filesystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. This value must be a URL percent-encoded string. Note that + the string may only contain ASCII characters in the ISO-8859-1 + character set. + :type rename_source: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :type source_lease_id: str + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param umask: Optional and only valid if Hierarchical Namespace is + enabled for the account. When creating a file or directory and the + parent folder does not have a default ACL, the umask restricts the + permissions of the file or directory to be created. The resulting + permission is given by p bitwise and not u, where p is the permission + and u is the umask. For example, if p is 0777 and u is 0057, then the + resulting permission is 0720. The default permission is 0777 for a + directory and 0666 for a file. The default umask is 0027. The umask + must be specified in 4-digit octal notation (e.g. 0766). + :type umask: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.filedatalake.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if resource is not None: + query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if rename_source is not None: + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}/{path}'} + + def update(self, action, body, mode=None, max_records=None, continuation=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Append Data | Flush Data | Set Properties | Set Access Control. + + Uploads data to be appended to a file, flushes (writes) previously + uploaded data to a file, sets properties for a file or directory, or + sets access control for a file or directory. Data can only be appended + to a file. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: The action must be "append" to upload data to be + appended to a file, "flush" to flush previously uploaded data to a + file, "setProperties" to set the properties of a file or directory, + "setAccessControl" to set the owner, group, permissions, or access + control list for a file or directory, or "setAccessControlRecursive" + to set the access control list for a directory recursively. Note that + Hierarchical Namespace must be enabled for the account in order to use + access control. Also note that the Access Control List (ACL) includes + permissions for the owner, owning group, and others, so the + x-ms-permissions and x-ms-acl request headers are mutually exclusive. + Possible values include: 'append', 'flush', 'setProperties', + 'setAccessControl', 'setAccessControlRecursive' + :type action: str or + ~azure.storage.filedatalake.models.PathUpdateAction + :param body: Initial data + :type body: Generator + :param mode: Optional. Valid and Required for + "SetAccessControlRecursive" operation. Mode "set" sets POSIX access + control rights on files and directories, "modify" modifies one or more + POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were + present earlier on files and directories. Possible values include: + 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param max_records: Optional. Valid for "SetAccessControlRecursive" + operation. It specifies the maximum number of files or directories on + which the acl change will be applied. If omitted or greater than + 2,000, the request will process up to 2,000 items + :type max_records: int + :param continuation: Optional. The number of paths processed with each + invocation is limited. If the number of paths to be processed exceeds + this limit, a continuation token is returned in the response header + x-ms-continuation. When a continuation token is returned in the + response, it must be percent-encoded and specified in a subsequent + invocation of setAcessControlRecursive operation. + :type continuation: str + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/octet-stream' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + update.metadata = {'url': '/{filesystem}/{path}'} + + def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Lease Path. + + Create and manage a lease to restrict write and delete access to the + path. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param x_ms_lease_action: There are five lease actions: "acquire", + "break", "change", "renew", and "release". Use "acquire" and specify + the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a + new lease. Use "break" to break an existing lease. When a lease is + broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the + file. When a lease is successfully broken, the response indicates the + interval in seconds until a new lease can be acquired. Use "change" + and specify the current lease ID in "x-ms-lease-id" and the new lease + ID in "x-ms-proposed-lease-id" to change the lease ID of an active + lease. Use "renew" and specify the "x-ms-lease-id" to renew an + existing lease. Use "release" and specify the "x-ms-lease-id" to + release a lease. Possible values include: 'acquire', 'break', + 'change', 'renew', 'release' + :type x_ms_lease_action: str or + ~azure.storage.filedatalake.models.PathLeaseAction + :param x_ms_lease_duration: The lease duration is required to acquire + a lease, and specifies the duration of the lease in seconds. The + lease duration must be between 15 and 60 seconds or -1 for infinite + lease. + :type x_ms_lease_duration: int + :param x_ms_lease_break_period: The lease break period duration is + optional to break a lease, and specifies the break period of the + lease in seconds. The lease break duration must be between 0 and 60 + seconds. + :type x_ms_lease_break_period: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') + if x_ms_lease_duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') + if x_ms_lease_break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + lease.metadata = {'url': '/{filesystem}/{path}'} + + def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Read File. + + Read the contents of a file. For read operations, range requests are + supported. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param range: The HTTP Range request header specifies one or more byte + ranges of the resource to be retrieved. + :type range: str + :param x_ms_range_get_content_md5: Optional. When this header is set + to "true" and specified together with the Range header, the service + returns the MD5 hash for the range, as long as the range is less than + or equal to 4MB in size. If this header is specified without the Range + header, the service returns status code 400 (Bad Request). If this + header is set to true when the range exceeds 4 MB in size, the service + returns status code 400 (Bad Request). + :type x_ms_range_get_content_md5: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.read.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if range is not None: + header_parameters['Range'] = self._serialize.header("range", range, 'str') + if x_ms_range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + read.metadata = {'url': '/{filesystem}/{path}'} + + def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Get Properties | Get Status | Get Access Control List. + + Get Properties returns all system and user defined properties for a + path. Get Status returns all system defined properties for a path. Get + Access Control List returns the access control list for a path. This + operation supports conditional HTTP requests. For more information, + see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: Optional. If the value is "getStatus" only the system + defined properties for the path are returned. If the value is + "getAccessControl" the access control list is returned in the response + headers (Hierarchical Namespace must be enabled for the account), + otherwise the properties are returned. Possible values include: + 'getAccessControl', 'getStatus' + :type action: str or + ~azure.storage.filedatalake.models.PathGetPropertiesAction + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if action is not None: + query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}/{path}'} + + def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Delete File | Delete Directory. + + Delete the file or directory. This operation supports conditional HTTP + requests. For more information, see [Specifying Conditional Headers + for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}/{path}'} + + def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + def set_access_control_recursive(self, mode, timeout=None, continuation=None, max_records=None, acl=None, request_id=None, cls=None, **kwargs): + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_records: Optional. It specifies the maximum number of files + or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + action = "setAccessControlRecursive" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} + + def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "flush" + + # Construct URL + url = self.flush_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + flush_data.metadata = {'url': '/{filesystem}/{path}'} + + def append_data(self, body, position=None, timeout=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): + """Append data to the file. + + :param body: Initial data + :type body: Generator + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + transactional_content_hash = None + if path_http_headers is not None: + transactional_content_hash = path_http_headers.transactional_content_hash + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + action = "append" + + # Construct URL + url = self.append_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_hash is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + append_data.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py new file mode 100644 index 0000000..540079a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar resource: The value must be "account" for all account operations. Constant value: "account". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.resource = "account" + + def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, cls=None, **kwargs): + """List FileSystems. + + List filesystems and their properties in given account. + + :param prefix: Filters results to filesystems within the specified + prefix. + :type prefix: str + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FileSystemList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.FileSystemList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_file_systems.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FileSystemList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py new file mode 100644 index 0000000..be04589 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2019-12-12" + diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py new file mode 100644 index 0000000..b208508 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py @@ -0,0 +1,648 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from enum import Enum + +from azure.core.paging import PageIterator +from azure.multiapi.storagev2.blob.v2019_12_12 import LeaseProperties as BlobLeaseProperties +from azure.multiapi.storagev2.blob.v2019_12_12 import AccountSasPermissions as BlobAccountSasPermissions +from azure.multiapi.storagev2.blob.v2019_12_12 import ResourceTypes as BlobResourceTypes +from azure.multiapi.storagev2.blob.v2019_12_12 import UserDelegationKey as BlobUserDelegationKey +from azure.multiapi.storagev2.blob.v2019_12_12 import ContentSettings as BlobContentSettings +from azure.multiapi.storagev2.blob.v2019_12_12 import ContainerSasPermissions, BlobSasPermissions +from azure.multiapi.storagev2.blob.v2019_12_12 import AccessPolicy as BlobAccessPolicy +from azure.multiapi.storagev2.blob.v2019_12_12 import DelimitedTextDialect as BlobDelimitedTextDialect +from azure.multiapi.storagev2.blob.v2019_12_12 import DelimitedJsonDialect as BlobDelimitedJSON +from azure.multiapi.storagev2.blob.v2019_12_12._generated.models import StorageErrorException +from azure.multiapi.storagev2.blob.v2019_12_12._models import ContainerPropertiesPaged +from ._deserialize import return_headers_and_deserialized_path_list +from ._generated.models import Path +from ._shared.models import DictMixin +from ._shared.response_handlers import process_storage_error + + +class FileSystemProperties(object): + """File System properties class. + + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file system was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the file system. + :ivar str public_access: Specifies whether data in the file system may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the file system has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the file system has a legal hold. + :ivar dict metadata: A dict with name-value pairs to associate with the + file system as metadata. + + Returned ``FileSystemProperties`` instances expose these values through a + dictionary interface, for example: ``file_system_props["last_modified"]``. + Additionally, the file system name is available as ``file_system_props["name"]``. + """ + def __init__(self): + self.name = None + self.last_modified = None + self.etag = None + self.lease = None + self.public_access = None + self.has_immutability_policy = None + self.has_legal_hold = None + self.metadata = None + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access + generated.properties.public_access) + props.has_immutability_policy = generated.properties.has_immutability_policy + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + return props + + @classmethod + def _convert_from_container_props(cls, container_properties): + container_properties.__class__ = cls + container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access + container_properties.public_access) + container_properties.lease.__class__ = LeaseProperties + return container_properties + + +class FileSystemPropertiesPaged(ContainerPropertiesPaged): + """An Iterable of File System properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file system name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only file systems whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of file system names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__(self, *args, **kwargs): + super(FileSystemPropertiesPaged, self).__init__( + *args, + **kwargs + ) + + @staticmethod + def _build_item(item): + return FileSystemProperties._from_generated(item) # pylint: disable=protected-access + + +class DirectoryProperties(DictMixin): + """ + :ivar str name: name of the directory + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool deleted: if the current directory marked as deleted + :ivar dict metadata: Name-value pairs associated with the directory as metadata. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar ~datetime.datetime creation_time: + Indicates when the directory was created, in UTC. + :ivar int remaining_retention_days: The number of days that the directory will be retained + before being permanently deleted by the service. + :var ~azure.storage.filedatalake.ContentSettings content_settings: + """ + def __init__(self, **kwargs): + super(DirectoryProperties, self).__init__( + **kwargs + ) + self.name = None + self.etag = None + self.deleted = None + self.metadata = None + self.lease = None + self.last_modified = None + self.creation_time = None + self.deleted_time = None + self.remaining_retention_days = None + + @classmethod + def _from_blob_properties(cls, blob_properties): + directory_props = DirectoryProperties() + directory_props.name = blob_properties.name + directory_props.etag = blob_properties.etag + directory_props.deleted = blob_properties.deleted + directory_props.metadata = blob_properties.metadata + directory_props.lease = blob_properties.lease + directory_props.lease.__class__ = LeaseProperties + directory_props.last_modified = blob_properties.last_modified + directory_props.creation_time = blob_properties.creation_time + directory_props.deleted_time = blob_properties.deleted_time + directory_props.remaining_retention_days = blob_properties.remaining_retention_days + return directory_props + + +class FileProperties(DictMixin): + """ + :ivar str name: name of the file + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool deleted: if the current file marked as deleted + :ivar dict metadata: Name-value pairs associated with the file as metadata. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar ~datetime.datetime creation_time: + Indicates when the file was created, in UTC. + :ivar int size: size of the file + :ivar int remaining_retention_days: The number of days that the file will be retained + before being permanently deleted by the service. + :var ~azure.storage.filedatalake.ContentSettings content_settings: + """ + def __init__(self, **kwargs): + super(FileProperties, self).__init__( + **kwargs + ) + self.name = None + self.etag = None + self.deleted = None + self.metadata = None + self.lease = None + self.last_modified = None + self.creation_time = None + self.size = None + self.deleted_time = None + self.remaining_retention_days = None + self.content_settings = None + + @classmethod + def _from_blob_properties(cls, blob_properties): + file_props = FileProperties() + file_props.name = blob_properties.name + file_props.etag = blob_properties.etag + file_props.deleted = blob_properties.deleted + file_props.metadata = blob_properties.metadata + file_props.lease = blob_properties.lease + file_props.lease.__class__ = LeaseProperties + file_props.last_modified = blob_properties.last_modified + file_props.creation_time = blob_properties.creation_time + file_props.size = blob_properties.size + file_props.deleted_time = blob_properties.deleted_time + file_props.remaining_retention_days = blob_properties.remaining_retention_days + file_props.content_settings = blob_properties.content_settings + return file_props + + +class PathProperties(object): + """Path properties listed by get_paths api. + + :ivar str name: the full path for a file or directory. + :ivar str owner: The owner of the file or directory. + :ivar str group: he owning group of the file or directory. + :ivar str permissions: Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. + :ivar bool is_directory: is the path a directory or not. + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar content_length: the size of file if the path is a file. + """ + def __init__(self, **kwargs): + super(PathProperties, self).__init__( + **kwargs + ) + self.name = kwargs.pop('name', None) + self.owner = kwargs.get('owner', None) + self.group = kwargs.get('group', None) + self.permissions = kwargs.get('permissions', None) + self.last_modified = kwargs.get('last_modified', None) + self.is_directory = kwargs.get('is_directory', False) + self.etag = kwargs.get('etag', None) + self.content_length = kwargs.get('content_length', None) + + @classmethod + def _from_generated(cls, generated): + path_prop = PathProperties() + path_prop.name = generated.name + path_prop.owner = generated.owner + path_prop.group = generated.group + path_prop.permissions = generated.permissions + path_prop.last_modified = generated.last_modified + path_prop.is_directory = bool(generated.is_directory) + path_prop.etag = generated.additional_properties.get('etag') + path_prop.content_length = generated.content_length + return path_prop + + +class PathPropertiesPaged(PageIterator): + """An Iterable of Path properties. + + :ivar str path: Filters the results to return only paths under the specified path. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. + + :param callable command: Function to retrieve the next page of items. + :param str path: Filters the results to return only paths under the specified path. + :param int max_results: The maximum number of psths to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__( + self, command, + recursive, + path=None, + max_results=None, + continuation_token=None, + upn=None): + super(PathPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.recursive = recursive + self.results_per_page = max_results + self.path = path + self.upn = upn + self.current_page = None + self.path_list = None + + def _get_next_cb(self, continuation_token): + try: + return self._command( + self.recursive, + continuation=continuation_token or None, + path=self.path, + max_results=self.results_per_page, + upn=self.upn, + cls=return_headers_and_deserialized_path_list) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.path_list, self._response = get_next_return + self.current_page = [self._build_item(item) for item in self.path_list] + + return self._response['continuation'] or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, PathProperties): + return item + if isinstance(item, Path): + path = PathProperties._from_generated(item) # pylint: disable=protected-access + return path + return item + + +class LeaseProperties(BlobLeaseProperties): + """DataLake Lease Properties. + + :ivar str status: + The lease status of the file. Possible values: locked|unlocked + :ivar str state: + Lease state of the file. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file is leased, specifies whether the lease is of infinite or fixed duration. + """ + def __init__(self): + self.status = None + self.state = None + self.duration = None + + +class ContentSettings(BlobContentSettings): + """The content settings of a file or directory. + + :ivar str content_type: + The content type specified for the file or directory. If no content type was + specified, the default content type is application/octet-stream. + :ivar str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :ivar str content_language: + If the content_language has previously been set + for the file, that value is stored. + :ivar str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :ivar str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :ivar str content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + :keyword str content_type: + The content type specified for the file or directory. If no content type was + specified, the default content type is application/octet-stream. + :keyword str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :keyword str content_language: + If the content_language has previously been set + for the file, that value is stored. + :keyword str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :keyword str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :keyword str content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + def __init__( + self, **kwargs): + super(ContentSettings, self).__init__( + **kwargs + ) + + +class AccountSasPermissions(BlobAccountSasPermissions): + def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin + create=False): + super(AccountSasPermissions, self).__init__( + read=read, create=create, write=write, list=list, + delete=delete + ) + + +class FileSystemSasPermissions(ContainerSasPermissions): + """FileSystemSasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_file_system_sas` function. + + :param bool read: + Read the content, properties, metadata etc. + :param bool write: + Create or write content, properties, metadata. Lease the file system. + :param bool delete: + Delete the file system. + :param bool list: + List paths in the file system. + """ + def __init__(self, read=False, write=False, delete=False, list=False # pylint: disable=redefined-builtin + ): + super(FileSystemSasPermissions, self).__init__( + read=read, write=write, delete=delete, list=list + ) + + +class DirectorySasPermissions(BlobSasPermissions): + """DirectorySasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_directory_sas` function. + + :param bool read: + Read the content, properties, metadata etc. + :param bool create: + Create a new directory + :param bool write: + Create or write content, properties, metadata. Lease the directory. + :param bool delete: + Delete the directory. + """ + def __init__(self, read=False, create=False, write=False, + delete=False): + super(DirectorySasPermissions, self).__init__( + read=read, create=create, write=write, + delete=delete + ) + + +class FileSasPermissions(BlobSasPermissions): + """FileSasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_file_sas` function. + + :param bool read: + Read the content, properties, metadata etc. Use the file as + the source of a read operation. + :param bool create: + Write a new file + :param bool write: + Create or write content, properties, metadata. Lease the file. + :param bool delete: + Delete the file. + """ + def __init__(self, read=False, create=False, write=False, + delete=False): + super(FileSasPermissions, self).__init__( + read=read, create=create, write=write, + delete=delete + ) + + +class AccessPolicy(BlobAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, **kwargs): + super(AccessPolicy, self).__init__( + permission=permission, expiry=expiry, start=kwargs.pop('start', None) + ) + + +class ResourceTypes(BlobResourceTypes): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g.List File Systems) + :param bool file_system: + Access to file_system-level APIs (e.g., Create/Delete file system, + List Directories/Files) + :param bool object: + Access to object-level APIs for + files(e.g. Create File, etc.) + """ + def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin + ): + super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) + + +class UserDelegationKey(BlobUserDelegationKey): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + @classmethod + def _from_generated(cls, generated): + delegation_key = cls() + delegation_key.signed_oid = generated.signed_oid + delegation_key.signed_tid = generated.signed_tid + delegation_key.signed_start = generated.signed_start + delegation_key.signed_expiry = generated.signed_expiry + delegation_key.signed_service = generated.signed_service + delegation_key.signed_version = generated.signed_version + delegation_key.value = generated.value + return delegation_key + + +class PublicAccess(str, Enum): + """ + Specifies whether data in the file system may be accessed publicly and the level of access. + """ + + File = 'blob' + """ + Specifies public read access for files. file data within this file system can be read + via anonymous request, but file system data is not available. Clients cannot enumerate + files within the container via anonymous request. + """ + + FileSystem = 'container' + """ + Specifies full public read access for file system and file data. Clients can enumerate + files within the file system via anonymous request, but cannot enumerate file systems + within the storage account. + """ + + @classmethod + def _from_generated(cls, public_access): + if public_access == "blob": # pylint:disable=no-else-return + return cls.File + elif public_access == "container": + return cls.FileSystem + + return None + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class DelimitedJsonDialect(BlobDelimitedJSON): + """Defines the input or output JSON serialization for a datalake query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + +class DelimitedTextDialect(BlobDelimitedTextDialect): + """Defines the input or output delimited (CSV) serialization for a datalake query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + + +class DataLakeFileQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py new file mode 100644 index 0000000..d57c903 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py @@ -0,0 +1,649 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six + +from azure.multiapi.storagev2.blob.v2019_12_12 import BlobClient +from ._shared.base_client import StorageAccountHostsMixin, parse_query +from ._shared.response_handlers import return_response_headers +from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ + get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions +from ._models import LocationMode, DirectoryProperties +from ._generated import DataLakeStorageClient +from ._data_lake_lease import DataLakeLeaseClient +from ._generated.models import StorageErrorException +from ._deserialize import process_storage_error + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class PathClient(StorageAccountHostsMixin): + def __init__( + self, account_url, # type: str + file_system_name, # type: str + path_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + # remove the preceding/trailing delimiter from the path components + file_system_name = file_system_name.strip('/') + + # the name of root directory is / + if path_name != '/': + path_name = path_name.strip('/') + + if not (file_system_name and path_name): + raise ValueError("Please specify a file system name and file path.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + self._blob_account_url = blob_account_url + + datalake_hosts = kwargs.pop('_hosts', None) + blob_hosts = None + if datalake_hosts: + blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, + credential=credential, _hosts=blob_hosts, **kwargs) + + _, sas_token = parse_query(parsed_url.query) + self.file_system_name = file_system_name + self.path_name = path_name + + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, + _hosts=datalake_hosts, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) + + def __exit__(self, *args): + self._blob_client.close() + super(PathClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._blob_client.close() + self.__exit__() + + def _format_url(self, hostname): + file_system_name = self.file_system_name + if isinstance(file_system_name, six.text_type): + file_system_name = file_system_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(file_system_name), + quote(self.path_name, safe='~'), + self._query_str) + + def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'resource': resource_type, + 'properties': add_metadata_headers(metadata), + 'permissions': kwargs.pop('permissions', None), + 'umask': kwargs.pop('umask', None), + 'path_http_headers': path_http_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create directory or file + + :param resource_type: + Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource_type: str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file/directory as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Dict[str, Union[str, datetime]] + """ + options = self._create_path_options( + resource_type, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _delete_path_options(**kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'recursive': True, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + def _delete(self, **kwargs): + # type: (bool, **Any) -> None + """ + Marks the specified path for deletion. + + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + options = self._delete_path_options(**kwargs) + try: + return self._client.path.delete(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'owner': owner, + 'group': group, + 'permissions': permissions, + 'acl': acl, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def set_access_control(self, owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Set the owner, group, permissions, or access control list for a path. + + :param owner: + Optional. The owner of the file or directory. + :type owner: str + :param group: + Optional. The owning group of the file or directory. + :type group: str + :param permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. + :type permissions: str + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. + :type acl: str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict (Etag and last modified). + """ + if not any([owner, group, permissions, acl]): + raise ValueError("At least one parameter should be set for set_access_control API") + options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) + try: + return self._client.path.set_access_control(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _get_access_control_options(upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'action': 'getAccessControl', + 'upn': upn if upn else False, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def get_access_control(self, upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + :param upn: Optional. + Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict. + """ + options = self._get_access_control_options(upn=upn, **kwargs) + try: + return self._client.path.get_properties(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): + raise ValueError("metadata, permissions, umask is not supported for this operation") + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) + mod_conditions = get_mod_conditions(kwargs) + source_mod_conditions = get_source_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'rename_source': quote(unquote(rename_source)), + 'path_http_headers': path_http_headers, + 'lease_access_conditions': access_conditions, + 'source_lease_id': source_lease_id, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions':source_mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'mode': 'legacy', + 'cls': return_response_headers} + options.update(kwargs) + return options + + def _rename_path(self, rename_source, + **kwargs): + # type: (**Any) -> Dict[str, Any] + """ + Rename directory or file + + :param rename_source: + The value must have the following format: "/{filesystem}/{path}". + :type rename_source: str + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._rename_path_options( + rename_source, + **kwargs) + try: + return self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _get_path_properties(self, **kwargs): + # type: (**Any) -> Union[FileProperties, DirectoryProperties] + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file or directory. It does not return the content of the directory or file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties or FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a file/directory. + """ + path_properties = self._blob_client.get_blob_properties(**kwargs) + path_properties.__class__ = DirectoryProperties + return path_properties + + def set_metadata(self, metadata, # type: Dict[str, str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + """ + return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) + + def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] + **kwargs): + # type: (...) -> Dict[str, Any] + """Sets system properties on the file or directory. + + If one property is set for the content_settings, all properties will be overriden. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set file/directory properties. + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file/directory-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) + + def acquire_lease(self, lease_duration=-1, # type: Optional[int] + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file or directory does not have an active lease, + the DataLake service creates a lease on the file/directory and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py new file mode 100644 index 0000000..ff67d27 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py @@ -0,0 +1,71 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import Union, Iterable, IO # pylint: disable=unused-import + + +class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + blob_query_reader + ): + self.name = blob_query_reader.name + self.file_system = blob_query_reader.container + self.response_headers = blob_query_reader.response_headers + self.record_delimiter = blob_query_reader.record_delimiter + self._bytes_processed = 0 + self._blob_query_reader = blob_query_reader + + def __len__(self): + return len(self._blob_query_reader) + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + return self._blob_query_reader.readall() + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + self._blob_query_reader(stream) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py new file mode 100644 index 0000000..0de3e85 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py @@ -0,0 +1,81 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from azure.multiapi.storagev2.blob.v2019_12_12._serialize import _get_match_headers # pylint: disable=protected-access +from ._shared import encode_base64 +from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ + SourceModifiedAccessConditions, LeaseAccessConditions + + +def convert_dfs_url_to_blob_url(dfs_account_url): + return dfs_account_url.replace('.dfs.', '.blob.', 1) + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> str + headers = list() + if metadata: + for key, value in metadata.items(): + headers.append(key + '=') + headers.append(encode_base64(value)) + headers.append(',') + + if headers: + del headers[-1] + + return ''.join(headers) + + +def get_mod_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None) + ) + + +def get_source_mod_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + + +def get_path_http_headers(content_settings): + path_headers = PathHTTPHeaders( + cache_control=content_settings.cache_control, + content_type=content_settings.content_type, + content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + content_encoding=content_settings.content_encoding, + content_language=content_settings.content_language, + content_disposition=content_settings.content_disposition + ) + return path_headers + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_lease_id(lease): + if not lease: + return "" + try: + lease_id = lease.id + except AttributeError: + lease_id = lease + return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py new file mode 100644 index 0000000..b11dc57 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py @@ -0,0 +1,140 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py new file mode 100644 index 0000000..14deea6 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py @@ -0,0 +1,437 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, + Optional, + Any, + Iterable, + Dict, + List, + Type, + Tuple, + TYPE_CHECKING, +) +import logging + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, + "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, + "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except StorageErrorException as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict(conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + except KeyError: + credential = conn_settings.get("SharedAccessSignature") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DefaultEndpointsProtocol"], + conn_settings["AccountName"], + service, + conn_settings["EndpointSuffix"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py new file mode 100644 index 0000000..d252ad0 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py @@ -0,0 +1,179 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except StorageErrorException as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/constants.py new file mode 100644 index 0000000..7fb05b5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/constants.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated.version import VERSION + + +X_MS_VERSION = VERSION + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 2000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py new file mode 100644 index 0000000..6919763 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py @@ -0,0 +1,468 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes + +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.filedatalake.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + parsed._str = permission # pylint: disable = protected-access + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py new file mode 100644 index 0000000..c9bc798 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py @@ -0,0 +1,610 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py new file mode 100644 index 0000000..4f15b65 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py @@ -0,0 +1,147 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py new file mode 100644 index 0000000..ac526e5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py @@ -0,0 +1,159 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.location_mode, deserialized + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body.iter(): + if info.tag.lower() == 'code': + error_code = info.text + elif info.tag.lower() == 'message': + error_message = info.text + else: + additional_data[info.tag] = info.text + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py new file mode 100644 index 0000000..367c655 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py @@ -0,0 +1,209 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py new file mode 100644 index 0000000..29949d5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py @@ -0,0 +1,568 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = self._upload_substream_block(block_id, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, block_id, block_stream): + try: + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py new file mode 100644 index 0000000..29c0ee4 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py @@ -0,0 +1,367 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = await self._upload_substream_block(block_id, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, block_id, block_stream): + try: + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + chunk_end, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py new file mode 100644 index 0000000..1186afa --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py @@ -0,0 +1,349 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + +from azure.multiapi.storagev2.blob.v2019_12_12 import generate_account_sas as generate_blob_account_sas +from azure.multiapi.storagev2.blob.v2019_12_12 import generate_container_sas, generate_blob_sas +if TYPE_CHECKING: + import datetime + from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ + UserDelegationKey + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the DataLake service. + + Use the returned signature as the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The access key to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_blob_account_sas( + account_name=account_name, + account_key=account_key, + resource_types=resource_types, + permission=permission, + expiry=expiry, + **kwargs + ) + + +def generate_file_system_sas( + account_name, # type: str + file_system_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file system. + + Use the returned signature with the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_container_sas( + account_name=account_name, + container_name=file_system_name, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) + + +def generate_directory_sas( + account_name, # type: str + file_system_name, # type: str + directory_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a directory. + + Use the returned signature with the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str directory_name: + The name of the directory. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_blob_sas( + account_name=account_name, + container_name=file_system_name, + blob_name=directory_name, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) + + +def generate_file_sas( + account_name, # type: str + file_system_name, # type: str + directory_name, # type: str + file_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any BDataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str directory_name: + The name of the directory. + :param str file_name: + The name of the file. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if directory_name: + path = directory_name.rstrip('/') + "/" + file_name + else: + path = file_name + return generate_blob_sas( + account_name=account_name, + container_name=file_system_name, + blob_name=path, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py new file mode 100644 index 0000000..d1a98dd --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py @@ -0,0 +1,87 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from ._deserialize import ( + process_storage_error) +from ._generated.models import ( + StorageErrorException, +) +from ._shared.response_handlers import return_response_headers +from ._shared.uploads import ( + upload_data_chunks, + DataLakeFileChunkUploader) + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_datalake_file( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + validate_content=None, + max_concurrency=None, + **kwargs): + try: + if length == 0: + return {} + properties = kwargs.pop('properties', None) + umask = kwargs.pop('umask', None) + permissions = kwargs.pop('permissions', None) + path_http_headers = kwargs.pop('path_http_headers', None) + modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) + + if not overwrite: + # if customers didn't specify access conditions, they cannot flush data to existing file + if not _any_conditions(modified_access_conditions): + modified_access_conditions.if_none_match = '*' + if properties or umask or permissions: + raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") + + if overwrite: + response = client.create( + resource='file', + path_http_headers=path_http_headers, + properties=properties, + modified_access_conditions=modified_access_conditions, + umask=umask, + permissions=permissions, + cls=return_response_headers, + **kwargs) + + # this modified_access_conditions will be applied to flush_data to make sure + # no other flush between create and the current flush + modified_access_conditions.if_match = response['etag'] + modified_access_conditions.if_none_match = None + modified_access_conditions.if_modified_since = None + modified_access_conditions.if_unmodified_since = None + + upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + + return client.flush_data(position=length, + path_http_headers=path_http_headers, + modified_access_conditions=modified_access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py new file mode 100644 index 0000000..8a3a444 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.1.1" diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py new file mode 100644 index 0000000..c24dde8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py @@ -0,0 +1,24 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._download_async import StorageStreamDownloader +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._data_lake_file_client_async import DataLakeFileClient +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._file_system_client_async import FileSystemClient +from ._data_lake_service_client_async import DataLakeServiceClient +from ._data_lake_lease_async import DataLakeLeaseClient + +__all__ = [ + 'DataLakeServiceClient', + 'FileSystemClient', + 'DataLakeDirectoryClient', + 'DataLakeFileClient', + 'DataLakeLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py new file mode 100644 index 0000000..8d4eb3e --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py @@ -0,0 +1,511 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from ._data_lake_file_client_async import DataLakeFileClient +from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase +from .._models import DirectoryProperties +from ._path_client_async import PathClient + + +class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): + """A client to interact with the DataLake directory, even if the directory may not yet exist. + + For operations relating to a specific subdirectory or file under the directory, a directory client or file client + can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param directory_name: + The whole path of the directory. eg. {directory under file system}/{directory to interact with} + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py + :start-after: [START instantiate_directory_client_from_conn_str] + :end-before: [END instantiate_directory_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call + credential=credential, **kwargs) + + async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new directory. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the directory has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 8 + :caption: Create directory. + """ + return await self._create('directory', metadata=metadata, **kwargs) + + async def delete_directory(self, **kwargs): + # type: (...) -> None + """ + Marks the specified directory for deletion. + + :keyword lease: + Required if the directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 4 + :caption: Delete directory. + """ + return await self._delete(**kwargs) + + async def get_directory_properties(self, **kwargs): + # type: (**Any) -> DirectoryProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the directory. It does not return the content of the directory. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START get_directory_properties] + :end-before: [END get_directory_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file/directory. + """ + blob_properties = await self._get_path_properties(**kwargs) + return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + + async def rename_directory(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeDirectoryClient + """ + Rename the source directory. + + :param str new_name: + the new directory name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}". + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START rename_directory] + :end-before: [END rename_directory] + :language: python + :dedent: 4 + :caption: Rename the source directory. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + path = new_name[len(new_file_system):] + + new_directory_client = DataLakeDirectoryClient( + self.url, new_file_system, directory_name=path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await new_directory_client._rename_path('/' + self.file_system_name + '/' + self.path_name, # pylint: disable=protected-access + **kwargs) + return new_directory_client + + async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create a subdirectory and return the subdirectory client to be interacted with. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory. + """ + subdir = self.get_sub_directory_client(sub_directory) + await subdir.create_directory(metadata=metadata, **kwargs) + return subdir + + async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified subdirectory for deletion. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory + """ + subdir = self.get_sub_directory_client(sub_directory) + await subdir.delete_directory(**kwargs) + return subdir + + async def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create a new file and return the file client to be interacted with. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + """ + file_client = self.get_file_client(file) + await file_client.create_file(**kwargs) + return file_client + + def get_file_client(self, file # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. eg. directory/subdirectory/file + :type file: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_client] + :end-before: [END bsc_get_file_client] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file.name + except AttributeError: + file_path = self.path_name + '/' + file + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified subdirectory of the current directory. + + The sub subdirectory need not already exist. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_directory_client] + :end-before: [END bsc_get_directory_client] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + subdir_path = sub_directory.name + except AttributeError: + subdir_path = self.path_name + '/' + sub_directory + + return DataLakeDirectoryClient( + self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py new file mode 100644 index 0000000..e74cc13 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py @@ -0,0 +1,513 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from ._download_async import StorageStreamDownloader +from ._path_client_async import PathClient +from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase +from .._deserialize import process_storage_error +from .._generated.models import StorageErrorException +from .._models import FileProperties +from ..aio._upload_helper import upload_datalake_file + + +class DataLakeFileClient(PathClient, DataLakeFileClientBase): + """A client to interact with the DataLake file, even if the file may not yet exist. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param file_path: + The whole file path, so that to interact with a specific file. + eg. "{directory}/{subdirectory}/{file}" + :type file_path: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py + :start-after: [START instantiate_file_client_from_conn_str] + :end-before: [END instantiate_file_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, + credential=credential, **kwargs) + + async def create_file(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new file. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 4 + :caption: Create file. + """ + return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) + + async def delete_file(self, **kwargs): + # type: (...) -> None + """ + Marks the specified file for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 4 + :caption: Delete file. + """ + return await self._delete(**kwargs) + + async def get_file_properties(self, **kwargs): + # type: (**Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. It does not return the content of the file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START get_file_properties] + :end-before: [END get_file_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file. + """ + blob_properties = await self._get_path_properties(**kwargs) + return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + + async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + overwrite=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Upload data to a file. + + :param data: Content to be uploaded to file + :param int length: Size of the data in bytes. + :param bool overwrite: to overwrite an existing file or not. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the blob as metadata. + :paramtype metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. + :return: response dict (Etag and last modified). + """ + options = self._upload_options( + data, + length=length, + overwrite=overwrite, + **kwargs) + return await upload_datalake_file(**options) + + async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + offset, # type: int + length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Append data to the file. + + :param data: Content to be appended to file + :param offset: start position of the data to be appended to. + :param length: Size of the data in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :return: dict of the response header + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START append_data] + :end-before: [END append_data] + :language: python + :dedent: 4 + :caption: Append data to the file. + """ + options = self._append_data_options( + data, + offset, + length=length, + **kwargs) + try: + return await self._client.path.append_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def flush_data(self, offset, # type: int + retain_uncommitted_data=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ Commit the previous appended data. + + :param offset: offset is equal to the length of the file after commit the + previous appended data. + :param bool retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword bool close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :return: response header in dict + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START upload_file_to_file_system] + :end-before: [END upload_file_to_file_system] + :language: python + :dedent: 12 + :caption: Commit the previous appended data. + """ + options = self._flush_data_options( + offset, + retain_uncommitted_data=retain_uncommitted_data, **kwargs) + try: + return await self._client.path.flush_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def download_file(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content, or readinto() must be used to download the file into + a stream. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword lease: + If specified, download only succeeds if the file's lease is active + and matches this ID. Required if the file has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START read_file] + :end-before: [END read_file] + :language: python + :dedent: 4 + :caption: Return the downloaded data. + """ + downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) + return StorageStreamDownloader(downloader) + + async def rename_file(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeFileClient + """ + Rename the source file. + + :param str new_name: the new file name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: the renamed file client + :rtype: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START rename_file] + :end-before: [END rename_file] + :language: python + :dedent: 4 + :caption: Rename the source file. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + path = new_name[len(new_file_system):] + + new_directory_client = DataLakeFileClient( + self.url, new_file_system, file_path=path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await new_directory_client._rename_path('/' + self.file_system_name + '/' + self.path_name, # pylint: disable=protected-access + **kwargs) + return new_directory_client diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py new file mode 100644 index 0000000..9858f92 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py @@ -0,0 +1,243 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, + TypeVar, TYPE_CHECKING +) +from azure.multiapi.storagev2.blob.v2019_12_12.aio import BlobLeaseClient +from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase + + +if TYPE_CHECKING: + FileSystemClient = TypeVar("FileSystemClient") + DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") + DataLakeFileClient = TypeVar("DataLakeFileClient") + + +class DataLakeLeaseClient(DataLakeLeaseClientBase): + """Creates a new DataLakeLeaseClient. + + This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file system, directory, or file to lease. + :type client: ~azure.storage.filedatalake.aio.FileSystemClient or + ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None + super(DataLakeLeaseClient, self).__init__(client, lease_id) + + if hasattr(client, '_blob_client'): + _client = client._blob_client # type: ignore # pylint: disable=protected-access + elif hasattr(client, '_container_client'): + _client = client._container_client # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") + + self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Optional[int], **Any) -> None + """Requests a new lease. + + If the file/file system does not have an active lease, the DataLake service creates a + lease on the file/file system and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) + self._update_lease_client_attributes() + + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the file system or file. Note that + the lease may be renewed even if it has expired as long as the file system + or file has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.renew(**kwargs) + self._update_lease_client_attributes() + + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the file system or file. Releasing the lease allows another client + to immediately acquire the lease for the file system or file as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.release(**kwargs) + self._update_lease_client_attributes() + + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) + self._update_lease_client_attributes() + + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the file system or file has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the file system or file. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py new file mode 100644 index 0000000..a004499 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py @@ -0,0 +1,372 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from azure.core.paging import ItemPaged + +from azure.multiapi.storagev2.blob.v2019_12_12.aio import BlobServiceClient +from .._generated.aio import DataLakeStorageClient +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from ._file_system_client_async import FileSystemClient +from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase +from .._shared.policies_async import ExponentialRetry +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._data_lake_file_client_async import DataLakeFileClient +from ._models import FileSystemPropertiesPaged +from .._models import UserDelegationKey, LocationMode + + +class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): + """A client to interact with the DataLake Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete file systems within the account. + For operations relating to a specific file system, directory or file, clients for those entities + can also be retrieved using the `get_client` functions. + + :ivar str url: + The full endpoint URL to the datalake service endpoint. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URL to the DataLake storage account. Any other entities included + in the URL path (e.g. file system or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(DataLakeServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs + ) + self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) + self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access + self._client = DataLakeStorageClient(self.url, None, None, pipeline=self._pipeline) + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._blob_service_client.close() + await super(DataLakeServiceClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._blob_service_client.close() + await self.__aexit__() + + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.filedatalake.UserDelegationKey + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_user_delegation_key] + :end-before: [END get_user_delegation_key] + :language: python + :dedent: 8 + :caption: Get user delegation key from datalake service client. + """ + delegation_key = await self._blob_service_client.get_user_delegation_key( + key_start_time=key_start_time, + key_expiry_time=key_expiry_time, + **kwargs) # pylint: disable=protected-access + return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + + def list_file_systems(self, name_starts_with=None, # type: Optional[str] + include_metadata=None, # type: Optional[bool] + **kwargs): + # type: (...) -> ItemPaged[FileSystemProperties] + """Returns a generator to list the file systems under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all file systems have been returned. + + :param str name_starts_with: + Filters the results to return only file systems whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that file system metadata be returned in the response. + The default value is `False`. + :keyword int results_per_page: + The maximum number of file system names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of FileSystemProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START list_file_systems] + :end-before: [END list_file_systems] + :language: python + :dedent: 8 + :caption: Listing the file systems in the datalake service. + """ + item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, + include_metadata=include_metadata, + **kwargs) # pylint: disable=protected-access + item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access + return item_paged + + async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> FileSystemClient + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param str file_system: + The name of the file system to create. + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: file system, file. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_file_system_from_service_client] + :end-before: [END create_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Creating a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) + return file_system_client + + async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] + **kwargs): + # type: (...) -> FileSystemClient + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :param file_system: + The file system to delete. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START delete_file_system_from_service_client] + :end-before: [END delete_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Deleting a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + await file_system_client.delete_file_system(**kwargs) + return file_system_client + + def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] + ): + # type: (...) -> FileSystemClient + """Get a client to interact with the specified file system. + + The file system need not already exist. + + :param file_system: + The file system. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :returns: A FileSystemClient. + :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Getting the file system client to interact with a specific file system. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + + return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, + _configuration=self._config, + _pipeline=self._pipeline, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] + directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param file_system: + The file system that the directory is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_directory_client_from_service_client] + :end-before: [END get_directory_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] + file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_system: + The file system that the file is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param file_path: + The file with which to interact. This can either be the full path of the file(from the root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_file_client_from_service_client] + :end-before: [END get_file_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py new file mode 100644 index 0000000..2fda96f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py @@ -0,0 +1,53 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from .._models import FileProperties + + +class StorageStreamDownloader(object): + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar ~azure.storage.filedatalake.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__(self, downloader): + self._downloader = downloader + self.name = self._downloader.name + self.properties = FileProperties._from_blob_properties(self._downloader.properties) # pylint: disable=protected-access + self.size = self._downloader.size + + def __len__(self): + return self.size + + def chunks(self): + return self._downloader.chunks() + + async def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + return await self._downloader.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py new file mode 100644 index 0000000..5cb930d --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py @@ -0,0 +1,745 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Dict, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace + +from azure.core.async_paging import AsyncItemPaged + +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.multiapi.storagev2.blob.v2019_12_12.aio import ContainerClient + +from ._data_lake_file_client_async import DataLakeFileClient +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._models import PathPropertiesPaged +from ._data_lake_lease_async import DataLakeLeaseClient +from .._file_system_client import FileSystemClient as FileSystemClientBase +from .._generated.aio import DataLakeStorageClient +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.policies_async import ExponentialRetry +from .._models import FileSystemProperties, PublicAccess + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings) + + +class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): + """A client to interact with a specific file system, even if that file system + may not yet exist. + + For operations relating to a specific directory or file within this file system, a directory client or file client + can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(FileSystemClient, self).__init__( + account_url, + file_system_name=file_system_name, + credential=credential, + **kwargs) + # to override the class field _container_client sync version + kwargs.pop('_hosts', None) + self._container_client = ContainerClient(self._blob_account_url, file_system_name, + credential=credential, + _hosts=self._container_client._hosts,# pylint: disable=protected-access + **kwargs) # type: ignore # pylint: disable=protected-access + self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._container_client.close() + await super(FileSystemClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._container_client.close() + await self.__aexit__() + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file system does not have an active lease, + the DataLake service creates a lease on the file system and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the file_system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system] + :end-before: [END create_file_system] + :language: python + :dedent: 16 + :caption: Creating a file system in the datalake service. + """ + return await self._container_client.create_container(metadata=metadata, + public_access=public_access, + **kwargs) + + @distributed_trace_async + async def delete_file_system(self, **kwargs): + # type: (Any) -> None + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_file_system] + :end-before: [END delete_file_system] + :language: python + :dedent: 16 + :caption: Deleting a file system in the datalake service. + """ + await self._container_client.delete_container(**kwargs) + + @distributed_trace_async + async def get_file_system_properties(self, **kwargs): + # type: (Any) -> FileSystemProperties + """Returns all user-defined metadata and system properties for the specified + file system. The data returned does not include the file system's list of paths. + + :keyword lease: + If specified, get_file_system_properties only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified file system within a file system object. + :rtype: ~azure.storage.filedatalake.FileSystemProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_file_system_properties] + :end-before: [END get_file_system_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the file system. + """ + container_properties = await self._container_client.get_container_properties(**kwargs) + return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access + + @distributed_trace_async + async def set_file_system_metadata( # type: ignore + self, metadata, # type: Dict[str, str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) + + @distributed_trace_async + async def set_file_system_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified file system or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a file system may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the file system. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] + :param ~azure.storage.filedatalake.PublicAccess public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :keyword lease: + Required if the file system has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: filesystem-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + """ + return await self._container_client.set_container_access_policy(signed_identifiers, + public_access=public_access, **kwargs) + + @distributed_trace_async + async def get_file_system_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified file system. + The permissions indicate whether file system data may be accessed publicly. + + :keyword lease: + If specified, get_file_system_access_policy only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_policy = await self._container_client.get_container_access_policy(**kwargs) + return { + 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access + 'signed_identifiers': access_policy['signed_identifiers'] + } + + @distributed_trace + def get_paths(self, path=None, # type: Optional[str] + recursive=True, # type: Optional[bool] + max_results=None, # type: Optional[int] + **kwargs): + # type: (...) -> ItemPaged[PathProperties] + """Returns a generator to list the paths(could be files or directories) under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str path: + Filters the results to return only paths under the specified path. + :param int max_results: + An optional value that specifies the maximum + number of items to return per page. If omitted or greater than 5,000, the + response will include up to 5,000 items per page. + :keyword upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of PathProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_paths_in_file_system] + :end-before: [END get_paths_in_file_system] + :language: python + :dedent: 12 + :caption: List the blobs in the file system. + """ + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.file_system.list_paths, + path=path, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, recursive, path=path, max_results=max_results, + page_iterator_class=PathPropertiesPaged, **kwargs) + + @distributed_trace_async + async def create_directory(self, directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create directory + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_directory_from_file_system] + :end-before: [END create_directory_from_file_system] + :language: python + :dedent: 12 + :caption: Create directory in the file system. + """ + directory_client = self.get_directory_client(directory) + await directory_client.create_directory(metadata=metadata, **kwargs) + return directory_client + + @distributed_trace_async + async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified path for deletion. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_directory_from_file_system] + :end-before: [END delete_directory_from_file_system] + :language: python + :dedent: 12 + :caption: Delete directory in the file system. + """ + directory_client = self.get_directory_client(directory) + await directory_client.delete_directory(**kwargs) + return directory_client + + @distributed_trace_async + async def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create file + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_from_file_system] + :end-before: [END create_file_from_file_system] + :language: python + :dedent: 12 + :caption: Create file in the file system. + """ + file_client = self.get_file_client(file) + await file_client.create_file(**kwargs) + return file_client + + @distributed_trace_async + async def delete_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Marks the specified file for deletion. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_file_from_file_system] + :end-before: [END delete_file_from_file_system] + :language: python + :dedent: 12 + :caption: Delete file in the file system. + """ + file_client = self.get_file_client(file) + await file_client.delete_file(**kwargs) + return file_client + + def _get_root_directory_client(self): + # type: () -> DataLakeDirectoryClient + """Get a client to interact with the root directory. + + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + """ + return self.get_directory_client('/') + + def get_directory_client(self, directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_directory_client_from_file_system] + :end-before: [END get_directory_client_from_file_system] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, + loop=self._loop + ) + + def get_file_client(self, file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_path: + The file with which to interact. This can either be the path of the file(from root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_file_client_from_file_system] + :end-before: [END get_file_client_from_file_system] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py new file mode 100644 index 0000000..9702ca6 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py @@ -0,0 +1,110 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from azure.core.async_paging import AsyncPageIterator +from azure.multiapi.storagev2.blob.v2019_12_12.aio._models import ContainerPropertiesPaged + +from .._deserialize import return_headers_and_deserialized_path_list, process_storage_error +from .._generated.models import StorageErrorException, Path +from .._models import PathProperties + +from .._models import FileSystemProperties + + +class FileSystemPropertiesPaged(ContainerPropertiesPaged): + """An Iterable of File System properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file system name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only file systems whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of file system names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__(self, *args, **kwargs): + super(FileSystemPropertiesPaged, self).__init__( + *args, + **kwargs + ) + + @staticmethod + def _build_item(item): + return FileSystemProperties._from_generated(item) # pylint: disable=protected-access + + +class PathPropertiesPaged(AsyncPageIterator): + """An Iterable of Path properties. + + :ivar str path: Filters the results to return only paths under the specified path. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. + + :param callable command: Function to retrieve the next page of items. + :param str path: Filters the results to return only paths under the specified path. + :param int max_results: The maximum number of psths to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__( + self, command, + recursive, + path=None, + max_results=None, + continuation_token=None, + upn=None): + super(PathPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.recursive = recursive + self.results_per_page = max_results + self.path = path + self.upn = upn + self.current_page = None + self.path_list = None + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + self.recursive, + continuation=continuation_token or None, + path=self.path, + max_results=self.results_per_page, + upn=self.upn, + cls=return_headers_and_deserialized_path_list) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.path_list, self._response = get_next_return + self.current_page = [self._build_item(item) for item in self.path_list] + + return self._response['continuation'] or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, PathProperties): + return item + if isinstance(item, Path): + path = PathProperties._from_generated(item) # pylint: disable=protected-access + return path + return item diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py new file mode 100644 index 0000000..2fa9d3f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py @@ -0,0 +1,490 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from azure.multiapi.storagev2.blob.v2019_12_12.aio import BlobClient +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._path_client import PathClient as PathClientBase +from .._models import DirectoryProperties +from .._generated.aio import DataLakeStorageClient +from ._data_lake_lease_async import DataLakeLeaseClient +from .._generated.models import StorageErrorException +from .._deserialize import process_storage_error +from .._shared.policies_async import ExponentialRetry + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): + def __init__( + self, account_url, # type: str + file_system_name, # type: str + path_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + + super(PathClient, self).__init__(account_url, file_system_name, path_name, # type: ignore # pylint: disable=specify-parameter-names-in-call + credential=credential, + **kwargs) + + kwargs.pop('_hosts', None) + self._blob_client = BlobClient(self._blob_account_url, file_system_name, blob_name=path_name, + credential=credential, _hosts=self._blob_client._hosts, **kwargs) # type: ignore # pylint: disable=protected-access + self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._blob_client.close() + await super(PathClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._blob_client.close() + await self.__aexit__() + + async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create directory or file + + :param resource_type: + Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource_type: str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file/directory as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Dict[str, Union[str, datetime]] + """ + options = self._create_path_options( + resource_type, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def _delete(self, **kwargs): + # type: (bool, **Any) -> None + """ + Marks the specified path for deletion. + + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + options = self._delete_path_options(**kwargs) + try: + return await self._client.path.delete(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def set_access_control(self, owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Set the owner, group, permissions, or access control list for a path. + + :param owner: + Optional. The owner of the file or directory. + :type owner: str + :param group: + Optional. The owning group of the file or directory. + :type group: str + :param permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. + :type permissions: str + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. + :type acl: str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict (Etag and last modified). + """ + options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) + try: + return await self._client.path.set_access_control(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def get_access_control(self, upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Get the owner, group, permissions, or access control list for a path. + + :param upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict. + """ + options = self._get_access_control_options(upn=upn, **kwargs) + try: + return await self._client.path.get_properties(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def _rename_path(self, rename_source, + **kwargs): + # type: (**Any) -> Dict[str, Any] + """ + Rename directory or file + + :param rename_source: The value must have the following format: "/{filesystem}/{path}". + :type rename_source: str + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._rename_path_options( + rename_source, + **kwargs) + try: + return await self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def _get_path_properties(self, **kwargs): + # type: (**Any) -> Union[FileProperties, DirectoryProperties] + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file or directory. It does not return the content of the directory or file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties or FileProperties + """ + path_properties = await self._blob_client.get_blob_properties(**kwargs) + path_properties.__class__ = DirectoryProperties + return path_properties + + async def set_metadata(self, metadata, # type: Dict[str, str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + """ + return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) + + async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] + **kwargs): + # type: (...) -> Dict[str, Any] + """Sets system properties on the file or directory. + + If one property is set for the content_settings, all properties will be overriden. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set file/directory properties. + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file/directory-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) + + async def acquire_lease(self, lease_duration=-1, # type: Optional[int] + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file or directory does not have an active lease, + the DataLake service creates a lease on the file/directory and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file_system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py new file mode 100644 index 0000000..93da7bf --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py @@ -0,0 +1,87 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from .._deserialize import ( + process_storage_error) +from .._generated.models import ( + StorageErrorException, +) +from .._shared.response_handlers import return_response_headers +from .._shared.uploads_async import ( + upload_data_chunks, + DataLakeFileChunkUploader) + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +async def upload_datalake_file( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + validate_content=None, + max_concurrency=None, + **kwargs): + try: + if length == 0: + return {} + properties = kwargs.pop('properties', None) + umask = kwargs.pop('umask', None) + permissions = kwargs.pop('permissions', None) + path_http_headers = kwargs.pop('path_http_headers', None) + modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) + + if not overwrite: + # if customers didn't specify access conditions, they cannot flush data to existing file + if not _any_conditions(modified_access_conditions): + modified_access_conditions.if_none_match = '*' + if properties or umask or permissions: + raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") + + if overwrite: + response = await client.create( + resource='file', + path_http_headers=path_http_headers, + properties=properties, + modified_access_conditions=modified_access_conditions, + umask=umask, + permissions=permissions, + cls=return_response_headers, + **kwargs) + + # this modified_access_conditions will be applied to flush_data to make sure + # no other flush between create and the current flush + modified_access_conditions.if_match = response['etag'] + modified_access_conditions.if_none_match = None + modified_access_conditions.if_modified_since = None + modified_access_conditions.if_unmodified_since = None + + await upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + + return await client.flush_data(position=length, + path_http_headers=path_http_headers, + modified_access_conditions=modified_access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py new file mode 100644 index 0000000..3266ae2 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py @@ -0,0 +1,68 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._version import VERSION +from ._file_client import ShareFileClient +from ._directory_client import ShareDirectoryClient +from ._share_client import ShareClient +from ._share_service_client import ShareServiceClient +from ._lease import ShareLeaseClient +from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import ( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode) +from ._models import ( + ShareProperties, + DirectoryProperties, + Handle, + FileProperties, + Metrics, + RetentionPolicy, + CorsRule, + AccessPolicy, + FileSasPermissions, + ShareSasPermissions, + ContentSettings, + NTFSAttributes) +from ._generated.models import ( + HandleItem +) + +__version__ = VERSION + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageErrorCode', + 'Metrics', + 'RetentionPolicy', + 'CorsRule', + 'AccessPolicy', + 'FileSasPermissions', + 'ShareSasPermissions', + 'ShareProperties', + 'DirectoryProperties', + 'FileProperties', + 'ContentSettings', + 'Handle', + 'NTFSAttributes', + 'HandleItem', + 'generate_account_sas', + 'generate_share_sas', + 'generate_file_sas' +] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py new file mode 100644 index 0000000..5475e6d --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py @@ -0,0 +1,64 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._models import ShareProperties, DirectoryProperties, FileProperties +from ._shared.response_handlers import deserialize_metadata + + +def deserialize_share_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + share_properties = ShareProperties( + metadata=metadata, + **headers + ) + return share_properties + + +def deserialize_directory_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + directory_properties = DirectoryProperties( + metadata=metadata, + **headers + ) + return directory_properties + + +def deserialize_file_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = FileProperties( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties + + +def deserialize_file_stream(response, obj, headers): + file_properties = deserialize_file_properties(response, obj, headers) + obj.properties = file_properties + return response.location_mode, obj + + +def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission + ''' + + return obj.permission + + +def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission key + ''' + + if response is None or headers is None: + return None + return headers.get('x-ms-file-permission-key', None) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py new file mode 100644 index 0000000..f1c7c05 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py @@ -0,0 +1,706 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +import time +from typing import ( # pylint: disable=unused-import + Optional, Union, Any, Dict, TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace + +from ._generated import AzureFileStorage +from ._generated.version import VERSION +from ._generated.models import StorageErrorException +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._deserialize import deserialize_directory_properties +from ._serialize import get_api_version +from ._file_client import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ShareProperties, DirectoryProperties, ContentSettings + from ._generated.models import HandleItem + + +class ShareDirectoryClient(StorageAccountHostsMixin): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + directory_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.directory_path = directory_path + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + @classmethod + def from_directory_url(cls, directory_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> ShareDirectoryClient + """Create a ShareDirectoryClient from a directory url. + + :param str directory_url: + The full URI to the directory. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + try: + if not directory_url.lower().startswith('http'): + directory_url = "https://" + directory_url + except AttributeError: + raise ValueError("Directory URL must be a string.") + parsed_url = urlparse(directory_url.rstrip('/')) + if not parsed_url.path and not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(directory_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + path_snapshot, _ = parse_query(parsed_url.query) + + share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') + share_name = unquote(share_name) + + directory_path = path_dir + snapshot = snapshot or path_snapshot + + return cls( + account_url=account_url, share_name=share_name, directory_path=directory_path, + credential=credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + directory_path = "" + if self.directory_path: + directory_path = "/" + quote(self.directory_path, safe='~') + return "{}://{}/{}{}{}".format( + self.scheme, + hostname, + quote(share_name), + directory_path, + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + directory_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareDirectoryClient + """Create ShareDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str directory_path: + The directory path. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 12 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode, **kwargs) + + @distributed_trace + def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 12 + :caption: Creates a directory. + """ + timeout = kwargs.pop('timeout', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.directory.create( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 12 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + self._client.directory.delete(timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], **Any) -> ItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 12 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> ItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace + def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace + def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def create_subdirectory( + self, directory_name, # type: str + **kwargs): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 12 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace + def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 12 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace + def upload_file( + self, file_name, # type: str + data, # type: Any + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 12 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + file_client.upload_file( + data, + length=length, + **kwargs) + return file_client # type: ignore + + @distributed_trace + def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 12 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py new file mode 100644 index 0000000..8a86027 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py @@ -0,0 +1,522 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import warnings +from io import BytesIO + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.common import with_current_context +from ._shared.encryption import decrypt_blob +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range + + +def process_range_and_offset(start_range, end_range, length, encryption): + start_offset, end_offset = 0, 0 + if encryption.get("key") is not None or encryption.get("resolver") is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = b"".join(list(data)) + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + **kwargs + ): + self.client = client + + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + range_header, range_validation = validate_and_format_range_headers( + download_range[0], download_range[1], check_content_md5=self.validate_content + ) + + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader): + self.size = size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # If no iterator was supplied, the download completed with + # the initial GET, so we just return that data + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + else: + chunk = next(self._iter_chunks) + self._current_content = self._iter_downloader.yield_chunk(chunk) + + return self._current_content + + next = __next__ # Python 2 compatibility. + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + try: + location_mode, response = self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + return response + + def chunks(self): + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader) + + def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + if parallel: + import concurrent.futures + executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py new file mode 100644 index 0000000..3110a19 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py @@ -0,0 +1,1328 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines +import functools +import time +from io import BytesIO +from typing import ( # pylint: disable=unused-import + Optional, Union, IO, List, Dict, Any, Iterable, + TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports +from azure.core.tracing.decorator import distributed_trace + +from ._generated import AzureFileStorage +from ._generated.version import VERSION +from ._generated.models import StorageErrorException, FileHTTPHeaders +from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, get_length +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._lease import ShareLeaseClient +from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version +from ._deserialize import deserialize_file_properties, deserialize_file_stream +from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import +from ._download import StorageStreamDownloader + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ShareProperties, ContentSettings, FileProperties, Handle + from ._generated.models import HandleItem + + +def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + **kwargs): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = client.create_file( + size, + content_settings=content_settings, + metadata=metadata, + timeout=timeout, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + **kwargs + ) + if size == 0: + return response + + responses = upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except StorageErrorException as error: + process_storage_error(error) + + +class ShareFileClient(StorageAccountHostsMixin): + """A client to interact with a specific file, although that file may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not (share_name and file_path): + raise ValueError("Please specify a share name and file name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.file_path = file_path.split('/') + self.file_name = self.file_path[-1] + self.directory_path = "/".join(self.file_path[:-1]) + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + @classmethod + def from_file_url( + cls, file_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """A client to interact with a specific file, although that file may not yet exist. + + :param str file_url: The full URI to the file. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + try: + if not file_url.lower().startswith('http'): + file_url = "https://" + file_url + except AttributeError: + raise ValueError("File URL must be a string.") + parsed_url = urlparse(file_url.rstrip('/')) + + if not (parsed_url.netloc and parsed_url.path): + raise ValueError("Invalid URL: {}".format(file_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + + path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') + path_snapshot, _ = parse_query(parsed_url.query) + snapshot = snapshot or path_snapshot + share_name = unquote(path_share) + file_path = '/'.join([unquote(p) for p in path_file.split('/')]) + return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + "/".join([quote(p, safe='~') for p in self.file_path]), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Create ShareFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str file_path: + The file path. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START create_file_client] + :end-before: [END create_file_client] + :language: python + :dedent: 12 + :caption: Creates the file client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) + + @distributed_trace + def acquire_lease(self, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> BlobLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_file( # type: ignore + self, size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 12 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + try: + return self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def upload_file( + self, data, # type: Any + length=None, # type: Optional[int] + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Uploads a new file. + + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 12 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + + if isinstance(data, six.text_type): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) # type: ignore + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return _upload_file_helper( # type: ignore + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + **kwargs) + + @distributed_trace + def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 12 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + try: + self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def download_file( + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Iterable[bytes] + """Downloads a file to a stream with automatic chunking. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A iterable data generator (stream) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 12 + :caption: Download a file. + """ + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + return StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + encryption_options=None, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs) + + @distributed_trace + def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 12 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = '/'.join(self.file_path) + return file_props # type: ignore + + @distributed_trace + def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop('size', None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_file_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.file.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + metadata=metadata, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def upload_range( # type: ignore + self, data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if isinstance(data, six.text_type): + data = data.encode(encoding) + + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _upload_range_from_url_options(source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + + if offset is None: + raise ValueError("offset must be provided.") + if length is None: + raise ValueError("length must be provided.") + if source_offset is None: + raise ValueError("source_offset must be provided.") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) + + source_mod_conditions = get_source_conditions(kwargs) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + options = { + 'copy_source': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_modified_access_conditions': source_mod_conditions, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.file.upload_range_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid ranges of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A list of valid ranges. + :rtype: List[dict[str, int]] + """ + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + content_range = None + if offset is not None: + if length is not None: + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + else: + content_range = 'bytes={0}-'.format(offset) + try: + ranges = self._client.file.get_range_list( + range=content_range, + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return [{'start': b.start, 'end': b.end} for b in ranges] + + @distributed_trace + def clear_range( # type: ignore + self, offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + try: + return self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> ItemPaged[Handle] + """Lists handles for file. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py new file mode 100644 index 0000000..22b5762 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage import AzureFileStorage +__all__ = ['AzureFileStorage'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py new file mode 100644 index 0000000..e3dd92c --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import PipelineClient +from msrest import Serializer, Deserializer + +from ._configuration import AzureFileStorageConfiguration +from azure.core.exceptions import map_error +from .operations import ServiceOperations +from .operations import ShareOperations +from .operations import DirectoryOperations +from .operations import FileOperations +from . import models + + +class AzureFileStorage(object): + """AzureFileStorage + + + :ivar service: Service operations + :vartype service: azure.storage.fileshare.operations.ServiceOperations + :ivar share: Share operations + :vartype share: azure.storage.fileshare.operations.ShareOperations + :ivar directory: Directory operations + :vartype directory: azure.storage.fileshare.operations.DirectoryOperations + :ivar file: File operations + :vartype file: azure.storage.fileshare.operations.FileOperations + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + """ + + def __init__(self, version, url, **kwargs): + + base_url = '{url}' + self._config = AzureFileStorageConfiguration(version, url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2019-12-12' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py new file mode 100644 index 0000000..d638b1e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .version import VERSION + + +class AzureFileStorageConfiguration(Configuration): + """Configuration for AzureFileStorage + Note that all parameters used to create this instance are saved as instance + attributes. + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + :ivar file_range_write_from_url: Only update is supported: - Update: + Writes the bytes downloaded from the source url into the specified range. + :type file_range_write_from_url: str + """ + + def __init__(self, version, url, **kwargs): + + if version is None: + raise ValueError("Parameter 'version' must not be None.") + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) + self.generate_client_request_id = True + + self.version = version + self.url = url + self.file_range_write_from_url = "update" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py new file mode 100644 index 0000000..942d3c5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage_async import AzureFileStorage +__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py new file mode 100644 index 0000000..39cf463 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import AsyncPipelineClient +from msrest import Serializer, Deserializer + +from ._configuration_async import AzureFileStorageConfiguration +from azure.core.exceptions import map_error +from .operations_async import ServiceOperations +from .operations_async import ShareOperations +from .operations_async import DirectoryOperations +from .operations_async import FileOperations +from .. import models + + +class AzureFileStorage(object): + """AzureFileStorage + + + :ivar service: Service operations + :vartype service: azure.storage.fileshare.aio.operations_async.ServiceOperations + :ivar share: Share operations + :vartype share: azure.storage.fileshare.aio.operations_async.ShareOperations + :ivar directory: Directory operations + :vartype directory: azure.storage.fileshare.aio.operations_async.DirectoryOperations + :ivar file: File operations + :vartype file: azure.storage.fileshare.aio.operations_async.FileOperations + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + """ + + def __init__( + self, version, url, **kwargs): + + base_url = '{url}' + self._config = AzureFileStorageConfiguration(version, url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2019-12-12' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py new file mode 100644 index 0000000..75c206e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ..version import VERSION + + +class AzureFileStorageConfiguration(Configuration): + """Configuration for AzureFileStorage + Note that all parameters used to create this instance are saved as instance + attributes. + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + :ivar file_range_write_from_url: Only update is supported: - Update: + Writes the bytes downloaded from the source url into the specified range. + :type file_range_write_from_url: str + """ + + def __init__(self, version, url, **kwargs): + + if version is None: + raise ValueError("Parameter 'version' must not be None.") + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) + self.generate_client_request_id = True + self.accept_language = None + + self.version = version + self.url = url + self.file_range_write_from_url = "update" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py new file mode 100644 index 0000000..601c709 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py @@ -0,0 +1,22 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations_async import ServiceOperations +from ._share_operations_async import ShareOperations +from ._directory_operations_async import DirectoryOperations +from ._file_operations_async import FileOperations + +__all__ = [ + 'ServiceOperations', + 'ShareOperations', + 'DirectoryOperations', + 'FileOperations', +] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py new file mode 100644 index 0000000..30aea57 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py @@ -0,0 +1,672 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class DirectoryOperations: + """DirectoryOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "directory". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "directory" + + async def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): + """Creates a new directory under the specified share or parent directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}'} + + async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): + """Returns all system properties for the specified directory, and can also + be used to check the existence of a directory. The data returned does + not include the files in the directory or any subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}'} + + async def delete(self, timeout=None, *, cls=None, **kwargs): + """Removes the specified empty directory. Note that the directory must be + empty before it can be deleted. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}'} + + async def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): + """Sets properties on the directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{shareName}/{directory}'} + + async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}'} + + async def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, *, cls=None, **kwargs): + """Returns a list of files or directories under the specified share or + directory. It lists the contents only for a single level of the + directory hierarchy. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListFilesAndDirectoriesSegmentResponse or the result of + cls(response) + :rtype: + ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_files_and_directories_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} + + async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}'} + + async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterix (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py new file mode 100644 index 0000000..b8957df --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py @@ -0,0 +1,1666 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class FileOperations: + """FileOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". + :ivar x_ms_copy_action: . Constant value: "abort". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_type = "file" + self.x_ms_copy_action = "abort" + + async def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Creates a new file or replaces a file. Note it only initializes the + file with no content. + + :param file_content_length: Specifies the maximum size for the file, + up to 1 TB. + :type file_content_length: long + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Reads or downloads a file from the system, including its metadata and + properties. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Return file data only from the specified byte range. + :type range: str + :param range_get_content_md5: When this header is set to true and + specified together with the Range header, the service returns the MD5 + hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.download.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Returns all user-defined metadata, standard HTTP properties, and system + properties for the file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def delete(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Sets HTTP headers on the file. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If + the specified byte value is less than the current size of the file, + then all ranges above the specified byte value are cleared. + :type file_content_length: long + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "properties" + + # Construct URL + url = self.set_http_headers.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_content_length is not None: + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def release_lease(self, lease_id, timeout=None, request_id=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the + start and end of the range must be specified. For an update operation, + the range can be up to 4 MB in size. For a clear operation, the range + can be up to the value of the file's full size. The File service + accepts only a single byte range for the Range and 'x-ms-range' + headers, and the byte range must be specified in the following format: + bytes=startByte-endByte. + :type range: str + :param file_range_write: Specify one of the following options: - + Update: Writes the bytes specified by the request body into the + specified range. The Range and Content-Length headers must match to + perform the update. - Clear: Clears the specified range and releases + the space used in storage for that range. To clear a range, set the + Content-Length header to zero, and set the Range header to a value + that indicates the range to clear, up to maximum file size. Possible + values include: 'update', 'clear' + :type file_range_write: str or + ~azure.storage.fileshare.models.FileRangeWriteType + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param optionalbody: Initial data. + :type optionalbody: Generator + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param content_md5: An MD5 hash of the content. This hash is used to + verify the integrity of the data during transport. When the + Content-MD5 header is specified, the File service compares the hash of + the content that has arrived with the header value that was sent. If + the two hashes do not match, the operation will fail with error code + 400 (Bad Request). + :type content_md5: bytearray + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Upload a range of bytes to a file where the contents are read from a + URL. + + :param range: Writes data to the specified byte range in the file. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the + range of bytes that must be read from the copy source. + :type source_content_crc64: bytearray + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + source_if_none_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if source_content_crc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if source_if_match_crc64 is not None: + header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') + if source_if_none_match_crc64 is not None: + header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def get_range_list(self, sharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, + inclusively. + :type range: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.Range] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "rangelist" + + # Construct URL + url = self.get_range_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[Range]', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param copy_file_smb_info: Additional parameters for the operation + :type copy_file_smb_info: + ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_permission_copy_mode = None + if copy_file_smb_info is not None: + file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + ignore_read_only = None + if copy_file_smb_info is not None: + ignore_read_only = copy_file_smb_info.ignore_read_only + file_attributes = None + if copy_file_smb_info is not None: + file_attributes = copy_file_smb_info.file_attributes + file_creation_time = None + if copy_file_smb_info is not None: + file_creation_time = copy_file_smb_info.file_creation_time + file_last_write_time = None + if copy_file_smb_info is not None: + file_last_write_time = copy_file_smb_info.file_last_write_time + set_archive_attribute = None + if copy_file_smb_info is not None: + set_archive_attribute = copy_file_smb_info.set_archive_attribute + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.start_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + if file_permission_copy_mode is not None: + header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') + if ignore_read_only is not None: + header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') + if file_attributes is not None: + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + if file_creation_time is not None: + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + if file_last_write_time is not None: + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if set_archive_attribute is not None: + header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Aborts a pending Copy File operation, and leaves a destination file + with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id + header of the original Copy File operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "copy" + + # Construct URL + url = self.abort_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, *, cls=None, **kwargs): + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, *, cls=None, **kwargs): + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterix (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py new file mode 100644 index 0000000..c4e40f1 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py @@ -0,0 +1,253 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "service". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "service" + + async def set_properties(self, storage_service_properties, timeout=None, *, cls=None, **kwargs): + """Sets properties for a storage account's File service endpoint, + including properties for Storage Analytics metrics and CORS + (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: + ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/'} + + async def get_properties(self, timeout=None, *, cls=None, **kwargs): + """Gets the properties of a storage account's File service, including + properties for Storage Analytics metrics and CORS (Cross-Origin + Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceProperties', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_properties.metadata = {'url': '/'} + + async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, *, cls=None, **kwargs): + """The List Shares Segment operation returns a list of the shares and + share snapshots under the specified account. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListSharesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_shares_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListSharesResponse', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py new file mode 100644 index 0000000..3005625 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py @@ -0,0 +1,825 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ShareOperations: + """ShareOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "share". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "share" + + async def create(self, timeout=None, metadata=None, quota=None, *, cls=None, **kwargs): + """Creates a new share under the specified account. If the share with the + same name already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}'} + + async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): + """Returns all user-defined metadata and system properties for the + specified share or share snapshot. The data returned does not include + the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), + 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), + 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), + 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), + 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}'} + + async def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, *, cls=None, **kwargs): + """Operation marks the specified share or share snapshot for deletion. The + share or share snapshot and any files contained within it are later + deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the + base share and all of its snapshots. Possible values include: + 'include' + :type delete_snapshots: str or + ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}'} + + async def create_snapshot(self, timeout=None, metadata=None, *, cls=None, **kwargs): + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "snapshot" + + # Construct URL + url = self.create_snapshot.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_snapshot.metadata = {'url': '/{shareName}'} + + async def create_permission(self, share_permission, timeout=None, *, cls=None, **kwargs): + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the + share level. + :type share_permission: + ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.create_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_permission.metadata = {'url': '/{shareName}'} + + async def get_permission(self, file_permission_key, timeout=None, *, cls=None, **kwargs): + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the + directory/file. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: SharePermission or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.get_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SharePermission', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_permission.metadata = {'url': '/{shareName}'} + + async def set_quota(self, timeout=None, quota=None, *, cls=None, **kwargs): + """Sets quota for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_quota.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_quota.metadata = {'url': '/{shareName}'} + + async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}'} + + async def get_access_policy(self, timeout=None, *, cls=None, **kwargs): + """Returns information about stored access policies specified on the + share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "acl" + + # Construct URL + url = self.get_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[SignedIdentifier]', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_access_policy.metadata = {'url': '/{shareName}'} + + async def set_access_policy(self, share_acl=None, timeout=None, *, cls=None, **kwargs): + """Sets a stored access policy for use with shared access signatures. + + :param share_acl: The ACL for the share. + :type share_acl: + list[~azure.storage.fileshare.models.SignedIdentifier] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "acl" + + # Construct URL + url = self.set_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} + if share_acl is not None: + body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{shareName}'} + + async def get_statistics(self, timeout=None, *, cls=None, **kwargs): + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ShareStats or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "stats" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ShareStats', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_statistics.metadata = {'url': '/{shareName}'} + + async def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, *, cls=None, **kwargs): + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_share_name: Specifies the name of the + preivously-deleted share. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the + preivously-deleted share. + :type deleted_share_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_share_name is not None: + header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') + if deleted_share_version is not None: + header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py new file mode 100644 index 0000000..44ec5d1 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import CopyFileSmbInfo + from ._models_py3 import CorsRule + from ._models_py3 import DirectoryItem + from ._models_py3 import FileHTTPHeaders + from ._models_py3 import FileItem + from ._models_py3 import FileProperty + from ._models_py3 import FilesAndDirectoriesListSegment + from ._models_py3 import HandleItem + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListFilesAndDirectoriesSegmentResponse + from ._models_py3 import ListHandlesResponse + from ._models_py3 import ListSharesResponse + from ._models_py3 import Metrics + from ._models_py3 import Range + from ._models_py3 import RetentionPolicy + from ._models_py3 import ShareItem + from ._models_py3 import SharePermission + from ._models_py3 import ShareProperties + from ._models_py3 import ShareStats + from ._models_py3 import SignedIdentifier + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StorageError, StorageErrorException + from ._models_py3 import StorageServiceProperties +except (SyntaxError, ImportError): + from ._models import AccessPolicy + from ._models import CopyFileSmbInfo + from ._models import CorsRule + from ._models import DirectoryItem + from ._models import FileHTTPHeaders + from ._models import FileItem + from ._models import FileProperty + from ._models import FilesAndDirectoriesListSegment + from ._models import HandleItem + from ._models import LeaseAccessConditions + from ._models import ListFilesAndDirectoriesSegmentResponse + from ._models import ListHandlesResponse + from ._models import ListSharesResponse + from ._models import Metrics + from ._models import Range + from ._models import RetentionPolicy + from ._models import ShareItem + from ._models import SharePermission + from ._models import ShareProperties + from ._models import ShareStats + from ._models import SignedIdentifier + from ._models import SourceModifiedAccessConditions + from ._models import StorageError, StorageErrorException + from ._models import StorageServiceProperties +from ._azure_file_storage_enums import ( + CopyStatusType, + DeleteSnapshotsOptionType, + FileRangeWriteType, + LeaseDurationType, + LeaseStateType, + LeaseStatusType, + ListSharesIncludeType, + PermissionCopyModeType, + StorageErrorCode, +) + +__all__ = [ + 'AccessPolicy', + 'CopyFileSmbInfo', + 'CorsRule', + 'DirectoryItem', + 'FileHTTPHeaders', + 'FileItem', + 'FileProperty', + 'FilesAndDirectoriesListSegment', + 'HandleItem', + 'LeaseAccessConditions', + 'ListFilesAndDirectoriesSegmentResponse', + 'ListHandlesResponse', + 'ListSharesResponse', + 'Metrics', + 'Range', + 'RetentionPolicy', + 'ShareItem', + 'SharePermission', + 'ShareProperties', + 'ShareStats', + 'SignedIdentifier', + 'SourceModifiedAccessConditions', + 'StorageError', 'StorageErrorException', + 'StorageServiceProperties', + 'StorageErrorCode', + 'PermissionCopyModeType', + 'DeleteSnapshotsOptionType', + 'ListSharesIncludeType', + 'CopyStatusType', + 'LeaseDurationType', + 'LeaseStateType', + 'LeaseStatusType', + 'FileRangeWriteType', +] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py new file mode 100644 index 0000000..66f39fb --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py @@ -0,0 +1,135 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class StorageErrorCode(str, Enum): + + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" + authorization_protocol_mismatch = "AuthorizationProtocolMismatch" + authorization_permission_mismatch = "AuthorizationPermissionMismatch" + authorization_service_mismatch = "AuthorizationServiceMismatch" + authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + + +class PermissionCopyModeType(str, Enum): + + source = "source" + override = "override" + + +class DeleteSnapshotsOptionType(str, Enum): + + include = "include" + + +class ListSharesIncludeType(str, Enum): + + snapshots = "snapshots" + metadata = "metadata" + deleted = "deleted" + + +class CopyStatusType(str, Enum): + + pending = "pending" + success = "success" + aborted = "aborted" + failed = "failed" + + +class LeaseDurationType(str, Enum): + + infinite = "infinite" + fixed = "fixed" + + +class LeaseStateType(str, Enum): + + available = "available" + leased = "leased" + expired = "expired" + breaking = "breaking" + broken = "broken" + + +class LeaseStatusType(str, Enum): + + locked = "locked" + unlocked = "unlocked" + + +class FileRangeWriteType(str, Enum): + + update = "update" + clear = "clear" diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py new file mode 100644 index 0000000..f5cc1fa --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py @@ -0,0 +1,896 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + :param start: The date-time the policy is active. + :type start: str + :param expiry: The date-time the policy expires. + :type expiry: str + :param permission: The permissions for the ACL policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class CopyFileSmbInfo(Model): + """Additional parameters for start_copy operation. + + :param file_permission_copy_mode: Specifies the option to copy file + security descriptor from source file or to set it using the value which is + defined by the header value of x-ms-file-permission or + x-ms-file-permission-key. Possible values include: 'source', 'override' + :type file_permission_copy_mode: str or + ~azure.storage.fileshare.models.PermissionCopyModeType + :param ignore_read_only: Specifies the option to overwrite the target file + if it already exists and has read-only attribute set. + :type ignore_read_only: bool + :param file_attributes: Specifies either the option to copy file + attributes from a source file(source) to a target file or a list of + attributes to set on a target file. + :type file_attributes: str + :param file_creation_time: Specifies either the option to copy file + creation time from a source file(source) to a target file or a time value + in ISO 8601 format to set as creation time on a target file. + :type file_creation_time: str + :param file_last_write_time: Specifies either the option to copy file last + write time from a source file(source) to a target file or a time value in + ISO 8601 format to set as last write time on a target file. + :type file_last_write_time: str + :param set_archive_attribute: Specifies the option to set archive + attribute on a target file. True means archive attribute will be set on a + target file despite attribute overrides or a source file state. + :type set_archive_attribute: bool + """ + + _attribute_map = { + 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, + 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, + 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, + 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, + 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, + 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CopyFileSmbInfo, self).__init__(**kwargs) + self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) + self.ignore_read_only = kwargs.get('ignore_read_only', None) + self.file_attributes = kwargs.get('file_attributes', None) + self.file_creation_time = kwargs.get('file_creation_time', None) + self.file_last_write_time = kwargs.get('file_last_write_time', None) + self.set_archive_attribute = kwargs.get('set_archive_attribute', None) + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. The request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs.get('allowed_origins', None) + self.allowed_methods = kwargs.get('allowed_methods', None) + self.allowed_headers = kwargs.get('allowed_headers', None) + self.exposed_headers = kwargs.get('exposed_headers', None) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) + + +class DirectoryItem(Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + } + _xml_map = { + 'name': 'Directory' + } + + def __init__(self, **kwargs): + super(DirectoryItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + + +class FileHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: File_create, + File_set_http_headers. + + :param file_content_type: Sets the MIME content type of the file. The + default type is 'application/octet-stream'. + :type file_content_type: str + :param file_content_encoding: Specifies which content encodings have been + applied to the file. + :type file_content_encoding: str + :param file_content_language: Specifies the natural languages used by this + resource. + :type file_content_language: str + :param file_cache_control: Sets the file's cache control. The File service + stores this value but does not use or modify it. + :type file_cache_control: str + :param file_content_md5: Sets the file's MD5 hash. + :type file_content_md5: bytearray + :param file_content_disposition: Sets the file's Content-Disposition + header. + :type file_content_disposition: str + """ + + _attribute_map = { + 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, + 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, + 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, + 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, + 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, + 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(FileHTTPHeaders, self).__init__(**kwargs) + self.file_content_type = kwargs.get('file_content_type', None) + self.file_content_encoding = kwargs.get('file_content_encoding', None) + self.file_content_language = kwargs.get('file_content_language', None) + self.file_cache_control = kwargs.get('file_cache_control', None) + self.file_content_md5 = kwargs.get('file_content_md5', None) + self.file_content_disposition = kwargs.get('file_content_disposition', None) + + +class FileItem(Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.FileProperty + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, + } + _xml_map = { + 'name': 'File' + } + + def __init__(self, **kwargs): + super(FileItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.properties = kwargs.get('properties', None) + + +class FileProperty(Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :param content_length: Required. Content length of the file. This value + may not be up-to-date since an SMB client may have modified the file + locally. The value of Content-Length may not reflect that fact until the + handle is closed or the op-lock is broken. To retrieve current property + values, call Get File Properties. + :type content_length: long + """ + + _validation = { + 'content_length': {'required': True}, + } + + _attribute_map = { + 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(FileProperty, self).__init__(**kwargs) + self.content_length = kwargs.get('content_length', None) + + +class FilesAndDirectoriesListSegment(Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :param directory_items: Required. + :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :param file_items: Required. + :type file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + 'directory_items': {'required': True}, + 'file_items': {'required': True}, + } + + _attribute_map = { + 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, + 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, + } + _xml_map = { + 'name': 'Entries' + } + + def __init__(self, **kwargs): + super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) + self.directory_items = kwargs.get('directory_items', None) + self.file_items = kwargs.get('file_items', None) + + +class HandleItem(Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :param handle_id: Required. XSMB service handle ID + :type handle_id: str + :param path: Required. File or directory name including full path starting + from share root + :type path: str + :param file_id: Required. FileId uniquely identifies the file or + directory. + :type file_id: str + :param parent_id: ParentId uniquely identifies the parent directory of the + object. + :type parent_id: str + :param session_id: Required. SMB session ID in context of which the file + handle was opened + :type session_id: str + :param client_ip: Required. Client IP that opened the handle + :type client_ip: str + :param open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :type open_time: datetime + :param last_reconnect_time: Time handle was last connected to (UTC) + :type last_reconnect_time: datetime + """ + + _validation = { + 'handle_id': {'required': True}, + 'path': {'required': True}, + 'file_id': {'required': True}, + 'session_id': {'required': True}, + 'client_ip': {'required': True}, + 'open_time': {'required': True}, + } + + _attribute_map = { + 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, + 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, + 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, + 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, + 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, + 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, + 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, + 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, + } + _xml_map = { + 'name': 'Handle' + } + + def __init__(self, **kwargs): + super(HandleItem, self).__init__(**kwargs) + self.handle_id = kwargs.get('handle_id', None) + self.path = kwargs.get('path', None) + self.file_id = kwargs.get('file_id', None) + self.parent_id = kwargs.get('parent_id', None) + self.session_id = kwargs.get('session_id', None) + self.client_ip = kwargs.get('client_ip', None) + self.open_time = kwargs.get('open_time', None) + self.last_reconnect_time = kwargs.get('last_reconnect_time', None) + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListFilesAndDirectoriesSegmentResponse(Model): + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param share_name: Required. + :type share_name: str + :param share_snapshot: + :type share_snapshot: str + :param directory_path: Required. + :type directory_path: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: + ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'share_name': {'required': True}, + 'directory_path': {'required': True}, + 'prefix': {'required': True}, + 'segment': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, + 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, + 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.share_name = kwargs.get('share_name', None) + self.share_snapshot = kwargs.get('share_snapshot', None) + self.directory_path = kwargs.get('directory_path', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.segment = kwargs.get('segment', None) + self.next_marker = kwargs.get('next_marker', None) + + +class ListHandlesResponse(Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :param handle_list: + :type handle_list: list[~azure.storage.fileshare.models.HandleItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListHandlesResponse, self).__init__(**kwargs) + self.handle_list = kwargs.get('handle_list', None) + self.next_marker = kwargs.get('next_marker', None) + + +class ListSharesResponse(Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param share_items: + :type share_items: list[~azure.storage.fileshare.models.ShareItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListSharesResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.share_items = kwargs.get('share_items', None) + self.next_marker = kwargs.get('next_marker', None) + + +class Metrics(Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + File service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.enabled = kwargs.get('enabled', None) + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class Range(Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. Start of the range. + :type start: long + :param end: Required. End of the range. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'Range' + } + + def __init__(self, **kwargs): + super(Range, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + + +class RetentionPolicy(Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the File service. If false, metrics data is retained, and the user is + responsible for deleting it. + :type enabled: bool + :param days: Indicates the number of days that metrics data should be + retained. All data older than this value will be deleted. Metrics data is + deleted on a best-effort basis after the retention period expires. + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'maximum': 365, 'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + self.days = kwargs.get('days', None) + + +class ShareItem(Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param snapshot: + :type snapshot: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.ShareProperties + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Share' + } + + def __init__(self, **kwargs): + super(ShareItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.snapshot = kwargs.get('snapshot', None) + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) + self.properties = kwargs.get('properties', None) + self.metadata = kwargs.get('metadata', None) + + +class SharePermission(Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :param permission: Required. The permission in the Security Descriptor + Definition Language (SDDL). + :type permission: str + """ + + _validation = { + 'permission': {'required': True}, + } + + _attribute_map = { + 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SharePermission, self).__init__(**kwargs) + self.permission = kwargs.get('permission', None) + + +class ShareProperties(Model): + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param quota: Required. + :type quota: int + :param provisioned_iops: + :type provisioned_iops: int + :param provisioned_ingress_mbps: + :type provisioned_ingress_mbps: int + :param provisioned_egress_mbps: + :type provisioned_egress_mbps: int + :param next_allowed_quota_downgrade_time: + :type next_allowed_quota_downgrade_time: datetime + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + 'quota': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, + 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, + 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, + 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, + 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareProperties, self).__init__(**kwargs) + self.last_modified = kwargs.get('last_modified', None) + self.etag = kwargs.get('etag', None) + self.quota = kwargs.get('quota', None) + self.provisioned_iops = kwargs.get('provisioned_iops', None) + self.provisioned_ingress_mbps = kwargs.get('provisioned_ingress_mbps', None) + self.provisioned_egress_mbps = kwargs.get('provisioned_egress_mbps', None) + self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + + +class ShareStats(Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :param share_usage_bytes: Required. The approximate size of the data + stored in bytes. Note that this value may not include all recently created + or recently resized files. + :type share_usage_bytes: int + """ + + _validation = { + 'share_usage_bytes': {'required': True}, + } + + _attribute_map = { + 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareStats, self).__init__(**kwargs) + self.share_usage_bytes = kwargs.get('share_usage_bytes', None) + + +class SignedIdentifier(Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique id. + :type id: str + :param access_policy: The access policy. + :type access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.access_policy = kwargs.get('access_policy', None) + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for upload_range_from_url operation. + + :param source_if_match_crc64: Specify the crc64 value to operate only on + range with a matching crc64 checksum. + :type source_if_match_crc64: bytearray + :param source_if_none_match_crc64: Specify the crc64 value to operate only + on range without a matching crc64 checksum. + :type source_if_none_match_crc64: bytearray + """ + + _attribute_map = { + 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, + 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) + self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage service properties. + + :param hour_metrics: A summary of request statistics grouped by API in + hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.models.Metrics + :param minute_metrics: A summary of request statistics grouped by API in + minute aggregates for files. + :type minute_metrics: ~azure.storage.fileshare.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.fileshare.models.CorsRule] + """ + + _attribute_map = { + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageServiceProperties, self).__init__(**kwargs) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py new file mode 100644 index 0000000..0be5dca --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py @@ -0,0 +1,896 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + :param start: The date-time the policy is active. + :type start: str + :param expiry: The date-time the policy expires. + :type expiry: str + :param permission: The permissions for the ACL policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class CopyFileSmbInfo(Model): + """Additional parameters for start_copy operation. + + :param file_permission_copy_mode: Specifies the option to copy file + security descriptor from source file or to set it using the value which is + defined by the header value of x-ms-file-permission or + x-ms-file-permission-key. Possible values include: 'source', 'override' + :type file_permission_copy_mode: str or + ~azure.storage.fileshare.models.PermissionCopyModeType + :param ignore_read_only: Specifies the option to overwrite the target file + if it already exists and has read-only attribute set. + :type ignore_read_only: bool + :param file_attributes: Specifies either the option to copy file + attributes from a source file(source) to a target file or a list of + attributes to set on a target file. + :type file_attributes: str + :param file_creation_time: Specifies either the option to copy file + creation time from a source file(source) to a target file or a time value + in ISO 8601 format to set as creation time on a target file. + :type file_creation_time: str + :param file_last_write_time: Specifies either the option to copy file last + write time from a source file(source) to a target file or a time value in + ISO 8601 format to set as last write time on a target file. + :type file_last_write_time: str + :param set_archive_attribute: Specifies the option to set archive + attribute on a target file. True means archive attribute will be set on a + target file despite attribute overrides or a source file state. + :type set_archive_attribute: bool + """ + + _attribute_map = { + 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, + 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, + 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, + 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, + 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, + 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, + } + _xml_map = { + } + + def __init__(self, *, file_permission_copy_mode=None, ignore_read_only: bool=None, file_attributes: str=None, file_creation_time: str=None, file_last_write_time: str=None, set_archive_attribute: bool=None, **kwargs) -> None: + super(CopyFileSmbInfo, self).__init__(**kwargs) + self.file_permission_copy_mode = file_permission_copy_mode + self.ignore_read_only = ignore_read_only + self.file_attributes = file_attributes + self.file_creation_time = file_creation_time + self.file_last_write_time = file_last_write_time + self.set_archive_attribute = set_archive_attribute + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. The request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class DirectoryItem(Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + } + _xml_map = { + 'name': 'Directory' + } + + def __init__(self, *, name: str, **kwargs) -> None: + super(DirectoryItem, self).__init__(**kwargs) + self.name = name + + +class FileHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: File_create, + File_set_http_headers. + + :param file_content_type: Sets the MIME content type of the file. The + default type is 'application/octet-stream'. + :type file_content_type: str + :param file_content_encoding: Specifies which content encodings have been + applied to the file. + :type file_content_encoding: str + :param file_content_language: Specifies the natural languages used by this + resource. + :type file_content_language: str + :param file_cache_control: Sets the file's cache control. The File service + stores this value but does not use or modify it. + :type file_cache_control: str + :param file_content_md5: Sets the file's MD5 hash. + :type file_content_md5: bytearray + :param file_content_disposition: Sets the file's Content-Disposition + header. + :type file_content_disposition: str + """ + + _attribute_map = { + 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, + 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, + 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, + 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, + 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, + 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None: + super(FileHTTPHeaders, self).__init__(**kwargs) + self.file_content_type = file_content_type + self.file_content_encoding = file_content_encoding + self.file_content_language = file_content_language + self.file_cache_control = file_cache_control + self.file_content_md5 = file_content_md5 + self.file_content_disposition = file_content_disposition + + +class FileItem(Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.FileProperty + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, + } + _xml_map = { + 'name': 'File' + } + + def __init__(self, *, name: str, properties, **kwargs) -> None: + super(FileItem, self).__init__(**kwargs) + self.name = name + self.properties = properties + + +class FileProperty(Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :param content_length: Required. Content length of the file. This value + may not be up-to-date since an SMB client may have modified the file + locally. The value of Content-Length may not reflect that fact until the + handle is closed or the op-lock is broken. To retrieve current property + values, call Get File Properties. + :type content_length: long + """ + + _validation = { + 'content_length': {'required': True}, + } + + _attribute_map = { + 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, + } + _xml_map = { + } + + def __init__(self, *, content_length: int, **kwargs) -> None: + super(FileProperty, self).__init__(**kwargs) + self.content_length = content_length + + +class FilesAndDirectoriesListSegment(Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :param directory_items: Required. + :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :param file_items: Required. + :type file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + 'directory_items': {'required': True}, + 'file_items': {'required': True}, + } + + _attribute_map = { + 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, + 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, + } + _xml_map = { + 'name': 'Entries' + } + + def __init__(self, *, directory_items, file_items, **kwargs) -> None: + super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) + self.directory_items = directory_items + self.file_items = file_items + + +class HandleItem(Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :param handle_id: Required. XSMB service handle ID + :type handle_id: str + :param path: Required. File or directory name including full path starting + from share root + :type path: str + :param file_id: Required. FileId uniquely identifies the file or + directory. + :type file_id: str + :param parent_id: ParentId uniquely identifies the parent directory of the + object. + :type parent_id: str + :param session_id: Required. SMB session ID in context of which the file + handle was opened + :type session_id: str + :param client_ip: Required. Client IP that opened the handle + :type client_ip: str + :param open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :type open_time: datetime + :param last_reconnect_time: Time handle was last connected to (UTC) + :type last_reconnect_time: datetime + """ + + _validation = { + 'handle_id': {'required': True}, + 'path': {'required': True}, + 'file_id': {'required': True}, + 'session_id': {'required': True}, + 'client_ip': {'required': True}, + 'open_time': {'required': True}, + } + + _attribute_map = { + 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, + 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, + 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, + 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, + 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, + 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, + 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, + 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, + } + _xml_map = { + 'name': 'Handle' + } + + def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None: + super(HandleItem, self).__init__(**kwargs) + self.handle_id = handle_id + self.path = path + self.file_id = file_id + self.parent_id = parent_id + self.session_id = session_id + self.client_ip = client_ip + self.open_time = open_time + self.last_reconnect_time = last_reconnect_time + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, + } + _xml_map = { + } + + def __init__(self, *, lease_id: str=None, **kwargs) -> None: + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListFilesAndDirectoriesSegmentResponse(Model): + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param share_name: Required. + :type share_name: str + :param share_snapshot: + :type share_snapshot: str + :param directory_path: Required. + :type directory_path: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: + ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'share_name': {'required': True}, + 'directory_path': {'required': True}, + 'prefix': {'required': True}, + 'segment': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, + 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, + 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None: + super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.share_name = share_name + self.share_snapshot = share_snapshot + self.directory_path = directory_path + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + + +class ListHandlesResponse(Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :param handle_list: + :type handle_list: list[~azure.storage.fileshare.models.HandleItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None: + super(ListHandlesResponse, self).__init__(**kwargs) + self.handle_list = handle_list + self.next_marker = next_marker + + +class ListSharesResponse(Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param share_items: + :type share_items: list[~azure.storage.fileshare.models.ShareItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None: + super(ListSharesResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.share_items = share_items + self.next_marker = next_marker + + +class Metrics(Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + File service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None: + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class Range(Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. Start of the range. + :type start: long + :param end: Required. End of the range. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'Range' + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(Range, self).__init__(**kwargs) + self.start = start + self.end = end + + +class RetentionPolicy(Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the File service. If false, metrics data is retained, and the user is + responsible for deleting it. + :type enabled: bool + :param days: Indicates the number of days that metrics data should be + retained. All data older than this value will be deleted. Metrics data is + deleted on a best-effort basis after the retention period expires. + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'maximum': 365, 'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + + +class ShareItem(Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param snapshot: + :type snapshot: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.ShareProperties + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Share' + } + + def __init__(self, *, name: str, properties, snapshot: str=None, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: + super(ShareItem, self).__init__(**kwargs) + self.name = name + self.snapshot = snapshot + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class SharePermission(Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :param permission: Required. The permission in the Security Descriptor + Definition Language (SDDL). + :type permission: str + """ + + _validation = { + 'permission': {'required': True}, + } + + _attribute_map = { + 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, + } + _xml_map = { + } + + def __init__(self, *, permission: str, **kwargs) -> None: + super(SharePermission, self).__init__(**kwargs) + self.permission = permission + + +class ShareProperties(Model): + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param quota: Required. + :type quota: int + :param provisioned_iops: + :type provisioned_iops: int + :param provisioned_ingress_mbps: + :type provisioned_ingress_mbps: int + :param provisioned_egress_mbps: + :type provisioned_egress_mbps: int + :param next_allowed_quota_downgrade_time: + :type next_allowed_quota_downgrade_time: datetime + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + 'quota': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, + 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, + 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, + 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, + 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + } + _xml_map = { + } + + def __init__(self, *, last_modified, etag: str, quota: int, provisioned_iops: int=None, provisioned_ingress_mbps: int=None, provisioned_egress_mbps: int=None, next_allowed_quota_downgrade_time=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None: + super(ShareProperties, self).__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.quota = quota + self.provisioned_iops = provisioned_iops + self.provisioned_ingress_mbps = provisioned_ingress_mbps + self.provisioned_egress_mbps = provisioned_egress_mbps + self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + + +class ShareStats(Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :param share_usage_bytes: Required. The approximate size of the data + stored in bytes. Note that this value may not include all recently created + or recently resized files. + :type share_usage_bytes: int + """ + + _validation = { + 'share_usage_bytes': {'required': True}, + } + + _attribute_map = { + 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, + } + _xml_map = { + } + + def __init__(self, *, share_usage_bytes: int, **kwargs) -> None: + super(ShareStats, self).__init__(**kwargs) + self.share_usage_bytes = share_usage_bytes + + +class SignedIdentifier(Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique id. + :type id: str + :param access_policy: The access policy. + :type access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for upload_range_from_url operation. + + :param source_if_match_crc64: Specify the crc64 value to operate only on + range with a matching crc64 checksum. + :type source_if_match_crc64: bytearray + :param source_if_none_match_crc64: Specify the crc64 value to operate only + on range without a matching crc64 checksum. + :type source_if_none_match_crc64: bytearray + """ + + _attribute_map = { + 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, + 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, + } + _xml_map = { + } + + def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None: + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match_crc64 = source_if_match_crc64 + self.source_if_none_match_crc64 = source_if_none_match_crc64 + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, *, message: str=None, **kwargs) -> None: + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage service properties. + + :param hour_metrics: A summary of request statistics grouped by API in + hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.models.Metrics + :param minute_metrics: A summary of request statistics grouped by API in + minute aggregates for files. + :type minute_metrics: ~azure.storage.fileshare.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.fileshare.models.CorsRule] + """ + + _attribute_map = { + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + } + _xml_map = { + } + + def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None: + super(StorageServiceProperties, self).__init__(**kwargs) + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py new file mode 100644 index 0000000..65680c9 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py @@ -0,0 +1,22 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._share_operations import ShareOperations +from ._directory_operations import DirectoryOperations +from ._file_operations import FileOperations + +__all__ = [ + 'ServiceOperations', + 'ShareOperations', + 'DirectoryOperations', + 'FileOperations', +] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py new file mode 100644 index 0000000..c1afd8e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py @@ -0,0 +1,672 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class DirectoryOperations(object): + """DirectoryOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "directory". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "directory" + + def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): + """Creates a new directory under the specified share or parent directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}'} + + def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): + """Returns all system properties for the specified directory, and can also + be used to check the existence of a directory. The data returned does + not include the files in the directory or any subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}'} + + def delete(self, timeout=None, cls=None, **kwargs): + """Removes the specified empty directory. Note that the directory must be + empty before it can be deleted. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}'} + + def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): + """Sets properties on the directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{shareName}/{directory}'} + + def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}'} + + def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, cls=None, **kwargs): + """Returns a list of files or directories under the specified share or + directory. It lists the contents only for a single level of the + directory hierarchy. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListFilesAndDirectoriesSegmentResponse or the result of + cls(response) + :rtype: + ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_files_and_directories_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} + + def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}'} + + def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterix (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py new file mode 100644 index 0000000..05636f7 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py @@ -0,0 +1,1665 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class FileOperations(object): + """FileOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". + :ivar x_ms_copy_action: . Constant value: "abort". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_type = "file" + self.x_ms_copy_action = "abort" + + def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): + """Creates a new file or replaces a file. Note it only initializes the + file with no content. + + :param file_content_length: Specifies the maximum size for the file, + up to 1 TB. + :type file_content_length: long + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, cls=None, **kwargs): + """Reads or downloads a file from the system, including its metadata and + properties. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Return file data only from the specified byte range. + :type range: str + :param range_get_content_md5: When this header is set to true and + specified together with the Range header, the service returns the MD5 + hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.download.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Returns all user-defined metadata, standard HTTP properties, and system + properties for the file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def delete(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): + """Sets HTTP headers on the file. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If + the specified byte value is less than the current size of the file, + then all ranges above the specified byte value are cleared. + :type file_content_length: long + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "properties" + + # Construct URL + url = self.set_http_headers.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_content_length is not None: + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs): + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def release_lease(self, lease_id, timeout=None, request_id=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, cls=None, **kwargs): + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the + start and end of the range must be specified. For an update operation, + the range can be up to 4 MB in size. For a clear operation, the range + can be up to the value of the file's full size. The File service + accepts only a single byte range for the Range and 'x-ms-range' + headers, and the byte range must be specified in the following format: + bytes=startByte-endByte. + :type range: str + :param file_range_write: Specify one of the following options: - + Update: Writes the bytes specified by the request body into the + specified range. The Range and Content-Length headers must match to + perform the update. - Clear: Clears the specified range and releases + the space used in storage for that range. To clear a range, set the + Content-Length header to zero, and set the Range header to a value + that indicates the range to clear, up to maximum file size. Possible + values include: 'update', 'clear' + :type file_range_write: str or + ~azure.storage.fileshare.models.FileRangeWriteType + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param optionalbody: Initial data. + :type optionalbody: Generator + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param content_md5: An MD5 hash of the content. This hash is used to + verify the integrity of the data during transport. When the + Content-MD5 header is specified, the File service compares the hash of + the content that has arrived with the header value that was sent. If + the two hashes do not match, the operation will fail with error code + 400 (Bad Request). + :type content_md5: bytearray + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + """Upload a range of bytes to a file where the contents are read from a + URL. + + :param range: Writes data to the specified byte range in the file. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the + range of bytes that must be read from the copy source. + :type source_content_crc64: bytearray + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + source_if_none_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if source_content_crc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if source_if_match_crc64 is not None: + header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') + if source_if_none_match_crc64 is not None: + header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def get_range_list(self, sharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, cls=None, **kwargs): + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, + inclusively. + :type range: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.Range] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "rangelist" + + # Construct URL + url = self.get_range_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[Range]', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, cls=None, **kwargs): + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param copy_file_smb_info: Additional parameters for the operation + :type copy_file_smb_info: + ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_permission_copy_mode = None + if copy_file_smb_info is not None: + file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + ignore_read_only = None + if copy_file_smb_info is not None: + ignore_read_only = copy_file_smb_info.ignore_read_only + file_attributes = None + if copy_file_smb_info is not None: + file_attributes = copy_file_smb_info.file_attributes + file_creation_time = None + if copy_file_smb_info is not None: + file_creation_time = copy_file_smb_info.file_creation_time + file_last_write_time = None + if copy_file_smb_info is not None: + file_last_write_time = copy_file_smb_info.file_last_write_time + set_archive_attribute = None + if copy_file_smb_info is not None: + set_archive_attribute = copy_file_smb_info.set_archive_attribute + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.start_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + if file_permission_copy_mode is not None: + header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') + if ignore_read_only is not None: + header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') + if file_attributes is not None: + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + if file_creation_time is not None: + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + if file_last_write_time is not None: + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if set_archive_attribute is not None: + header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Aborts a pending Copy File operation, and leaves a destination file + with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id + header of the original Copy File operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "copy" + + # Construct URL + url = self.abort_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, cls=None, **kwargs): + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, cls=None, **kwargs): + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterix (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py new file mode 100644 index 0000000..cd43e83 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py @@ -0,0 +1,253 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "service". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "service" + + def set_properties(self, storage_service_properties, timeout=None, cls=None, **kwargs): + """Sets properties for a storage account's File service endpoint, + including properties for Storage Analytics metrics and CORS + (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: + ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/'} + + def get_properties(self, timeout=None, cls=None, **kwargs): + """Gets the properties of a storage account's File service, including + properties for Storage Analytics metrics and CORS (Cross-Origin + Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceProperties', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_properties.metadata = {'url': '/'} + + def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, cls=None, **kwargs): + """The List Shares Segment operation returns a list of the shares and + share snapshots under the specified account. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListSharesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_shares_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListSharesResponse', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py new file mode 100644 index 0000000..4b53be8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py @@ -0,0 +1,825 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ShareOperations(object): + """ShareOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "share". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "share" + + def create(self, timeout=None, metadata=None, quota=None, cls=None, **kwargs): + """Creates a new share under the specified account. If the share with the + same name already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}'} + + def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): + """Returns all user-defined metadata and system properties for the + specified share or share snapshot. The data returned does not include + the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), + 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), + 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), + 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), + 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}'} + + def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, cls=None, **kwargs): + """Operation marks the specified share or share snapshot for deletion. The + share or share snapshot and any files contained within it are later + deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the + base share and all of its snapshots. Possible values include: + 'include' + :type delete_snapshots: str or + ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}'} + + def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs): + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "snapshot" + + # Construct URL + url = self.create_snapshot.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_snapshot.metadata = {'url': '/{shareName}'} + + def create_permission(self, share_permission, timeout=None, cls=None, **kwargs): + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the + share level. + :type share_permission: + ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.create_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_permission.metadata = {'url': '/{shareName}'} + + def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs): + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the + directory/file. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: SharePermission or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.get_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SharePermission', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_permission.metadata = {'url': '/{shareName}'} + + def set_quota(self, timeout=None, quota=None, cls=None, **kwargs): + """Sets quota for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_quota.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_quota.metadata = {'url': '/{shareName}'} + + def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}'} + + def get_access_policy(self, timeout=None, cls=None, **kwargs): + """Returns information about stored access policies specified on the + share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "acl" + + # Construct URL + url = self.get_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[SignedIdentifier]', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_access_policy.metadata = {'url': '/{shareName}'} + + def set_access_policy(self, share_acl=None, timeout=None, cls=None, **kwargs): + """Sets a stored access policy for use with shared access signatures. + + :param share_acl: The ACL for the share. + :type share_acl: + list[~azure.storage.fileshare.models.SignedIdentifier] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "acl" + + # Construct URL + url = self.set_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} + if share_acl is not None: + body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{shareName}'} + + def get_statistics(self, timeout=None, cls=None, **kwargs): + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ShareStats or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "stats" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ShareStats', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_statistics.metadata = {'url': '/{shareName}'} + + def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, cls=None, **kwargs): + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_share_name: Specifies the name of the + preivously-deleted share. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the + preivously-deleted share. + :type deleted_share_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_share_name is not None: + header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') + if deleted_share_version is not None: + header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py new file mode 100644 index 0000000..be04589 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2019-12-12" + diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py new file mode 100644 index 0000000..f67264a --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py @@ -0,0 +1,170 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._generated.models import StorageErrorException + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + + +class ShareLeaseClient(object): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file to lease. + :type client: ~azure.storage.fileshare.ShareFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (ShareFileClient, Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'file_name'): + self._client = client._client.file # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use ShareFileClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, **kwargs): + # type: (int, **Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file for write and delete operations. If the file does not have an active lease, + the File service creates a lease on the file. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file does not have an active lease, the File service creates a + lease on the file and returns a new lease ID. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=-1, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the file. Releasing the lease allows another client to immediately acquire the lease + for the file as soon as the release is complete. + + + The lease may be released if the client lease id specified matches + that associated with the file. Releasing the lease allows another client + to immediately acquire the lease for the file as soon as the release is complete. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, **kwargs): + # type: (Optional[int], Any) -> int + """Force breaks the lease if the file has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py new file mode 100644 index 0000000..2d348d6 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py @@ -0,0 +1,925 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.paging import PageIterator +from ._parser import _parse_datetime_from_str +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import StorageErrorException +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import AccessPolicy as GenAccessPolicy +from ._generated.models import DirectoryItem + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for files. + + All required parameters must be populated in order to send to Azure. + + :keyword str version: The version of Storage Analytics to configure. + :keyword bool enabled: Required. Indicates whether metrics are enabled for the + File service. + :keyword bool include_ap_is: Indicates whether metrics should generate summary + statistics for called API operations. + :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should + persist. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + All required parameters must be populated in order to send to Azure. + + :param bool enabled: Required. Indicates whether a retention policy is enabled + for the storage service. + :param int days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. + """ + + def __init__(self, enabled=False, days=None): + self.enabled = enabled + self.days = days + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get acl methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.fileshare.FileSasPermissions or + ~azure.storage.fileshare.ShareSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class LeaseProperties(DictMixin): + """File Lease Properties. + + :ivar str status: + The lease status of the file. Possible values: locked|unlocked + :ivar str state: + Lease state of the file. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """Used to store the content settings of a file. + + :param str content_type: + The content type specified for the file. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :param str content_language: + If the content_language has previously been set + for the file, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :param str content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class ShareProperties(DictMixin): + """Share's properties class. + + :ivar str name: + The name of the share. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the share was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int quota: + The allocated quota. + :ivar dict metadata: A dict with name_value pairs to associate with the + share as metadata. + :ivar str snapshot: + Snapshot of the share. + :ivar bool deleted: + To indicate if this share is deleted or not. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar datetime deleted: + To indicate the deleted time of the deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar str version: + To indicate the version of deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar int remaining_retention_days: + To indicate how many remaining days the deleted share will be kept. + This is a service returned value, and the value will be set when list shared including deleted ones. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.quota = kwargs.get('x-ms-share-quota') + self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') + self.metadata = kwargs.get('metadata') + self.snapshot = None + self.deleted = None + self.deleted_time = None + self.version = None + self.remaining_retention_days = None + self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') + self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') + self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.quota = generated.properties.quota + props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time + props.metadata = generated.metadata + props.snapshot = generated.snapshot + props.deleted = generated.deleted + props.deleted_time = generated.properties.deleted_time + props.version = generated.version + props.remaining_retention_days = generated.properties.remaining_retention_days + props.provisioned_egress_mbps = generated.properties.provisioned_egress_mbps + props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_mbps + props.provisioned_iops = generated.properties.provisioned_iops + return props + + +class SharePropertiesPaged(PageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + prefix=self.prefix, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class Handle(DictMixin): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :keyword str handle_id: Required. XSMB service handle ID + :keyword str path: Required. File or directory name including full path starting + from share root + :keyword str file_id: Required. FileId uniquely identifies the file or + directory. + :keyword str parent_id: ParentId uniquely identifies the parent directory of the + object. + :keyword str session_id: Required. SMB session ID in context of which the file + handle was opened + :keyword str client_ip: Required. Client IP that opened the handle + :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('handle_id') + self.path = kwargs.get('path') + self.file_id = kwargs.get('file_id') + self.parent_id = kwargs.get('parent_id') + self.session_id = kwargs.get('session_id') + self.client_ip = kwargs.get('client_ip') + self.open_time = kwargs.get('open_time') + self.last_reconnect_time = kwargs.get('last_reconnect_time') + + @classmethod + def _from_generated(cls, generated): + handle = cls() + handle.id = generated.handle_id + handle.path = generated.path + handle.file_id = generated.file_id + handle.parent_id = generated.parent_id + handle.session_id = generated.session_id + handle.client_ip = generated.client_ip + handle.open_time = generated.open_time + handle.last_reconnect_time = generated.last_reconnect_time + return handle + + +class HandlesPaged(PageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryProperties(DictMixin): + """Directory's properties class. + + :ivar str name: + The name of the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool server_encrypted: + Whether encryption is enabled. + :keyword dict metadata: A dict with name_value pairs to associate with the + directory as metadata. + :ivar change_time: Change time for the file. + :vartype change_time: str or ~datetime.datetime + :ivar creation_time: Creation time for the file. + :vartype creation_time: str or ~datetime.datetime + :ivar last_write_time: Last write time for the file. + :vartype last_write_time: str or ~datetime.datetime + :ivar file_attributes: + The file system attributes for files and directories. + :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :ivar permission_key: Key of the permission to be set for the + directory/file. + :vartype permission_key: str + :ivar file_id: Required. FileId uniquely identifies the file or + directory. + :vartype file_id: str + :ivar parent_id: ParentId uniquely identifies the parent directory of the + object. + :vartype parent_id: str + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.metadata = kwargs.get('metadata') + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.server_encrypted = generated.properties.server_encrypted + props.metadata = generated.metadata + return props + + +class DirectoryPropertiesPaged(PageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] + self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) + return self._response.next_marker or None, self.current_page + + +class FileProperties(DictMixin): + """File's properties class. + + :ivar str name: + The name of the file. + :ivar str path: + The path of the file. + :ivar str share: + The name of share. + :ivar str snapshot: + File snapshot. + :ivar int content_length: + Size of file in bytes. + :ivar dict metadata: A dict with name_value pairs to associate with the + file as metadata. + :ivar str file_type: + Type of the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + Size of file in bytes. + :ivar str content_range: + The range of bytes. + :ivar bool server_encrypted: + Whether encryption is enabled. + :ivar copy: + The copy properties. + :vartype copy: ~azure.storage.fileshare.CopyProperties + :ivar content_settings: + The content settings for the file. + :vartype content_settings: ~azure.storage.fileshare.ContentSettings + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.path = None + self.share = None + self.snapshot = None + self.content_length = kwargs.get('Content-Length') + self.metadata = kwargs.get('metadata') + self.file_type = kwargs.get('x-ms-type') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.content_length = generated.properties.content_length + props.metadata = generated.properties.metadata + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + return props + + +class CopyProperties(DictMixin): + """File Copy Properties. + + :ivar str id: + String identifier for the last attempted Copy File operation where this file + was the destination file. This header does not appear if this file has never + been the destination in a Copy File operation, or if this file has been + modified after a concluded Copy File operation. + :ivar str source: + URL up to 2 KB in length that specifies the source file used in the last attempted + Copy File operation where this file was the destination file. This header does not + appear if this file has never been the destination in a Copy File operation, or if + this file has been modified after a concluded Copy File operation. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy File. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy File operation where this file was the destination file. Can show + between 0 and Content-Length bytes copied. + :ivar datetime completion_time: + Conclusion time of the last attempted Copy File operation where this file was the + destination file. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source file to a destination file. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar datetime destination_snapshot: + Included if the file is incremental copy or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this file. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class FileSasPermissions(object): + """FileSasPermissions class to be used with + generating shared access signature operations. + + :param bool read: + Read the content, properties, metadata. Use the file as the source of a copy + operation. + :param bool create: + Create a new file or copy a file to a new file. + :param bool write: + Create or write content, properties, metadata. Resize the file. Use the file + as the destination of a copy operation within the same account. + :param bool delete: + Delete the file. + """ + def __init__(self, read=False, create=False, write=False, delete=False): + self.read = read + self.create = create + self.write = write + self.delete = delete + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSasPermissions from a string. + + To specify read, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + create permissions, you would provide a string "rc". + + :param str permission: The string which dictates the read, create, + write, or delete permissions + :return: A FileSasPermissions object + :rtype: ~azure.storage.fileshare.FileSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + + parsed = cls(p_read, p_create, p_write, p_delete) + parsed._str = permission # pylint: disable = protected-access + return parsed + + +class ShareSasPermissions(object): + """ShareSasPermissions class to be used to be used with + generating shared access signature and access policy operations. + + :param bool read: + Read the content, properties or metadata of any file in the share. Use any + file in the share as the source of a copy operation. + :param bool write: + For any file in the share, create or write content, properties or metadata. + Resize the file. Use the file as the destination of a copy operation within + the same account. + Note: You cannot grant permissions to read or write share properties or + metadata with a service SAS. Use an account SAS instead. + :param bool delete: + Delete any file in the share. + Note: You cannot grant permissions to delete a share with a service SAS. Use + an account SAS instead. + :param bool list: + List files and directories in the share. + """ + def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin + self.read = read + self.write = write + self.delete = delete + self.list = list + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '')) + + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ShareSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, + delete, or list permissions + :return: A ShareSasPermissions object + :rtype: ~azure.storage.fileshare.ShareSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + + parsed = cls(p_read, p_write, p_delete, p_list) + parsed._str = permission # pylint: disable = protected-access + return parsed + +class NTFSAttributes(object): + """ + Valid set of attributes to set for file or directory. + To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. + + :ivar bool read_only: + Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE + :ivar bool hidden: + Enable/disable 'Hidden' attribute for DIRECTORY or FILE + :ivar bool system: + Enable/disable 'System' attribute for DIRECTORY or FILE + :ivar bool none: + Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY + :ivar bool directory: + Enable/disable 'Directory' attribute for DIRECTORY + :ivar bool archive: + Enable/disable 'Archive' attribute for DIRECTORY or FILE + :ivar bool temporary: + Enable/disable 'Temporary' attribute for FILE + :ivar bool offline: + Enable/disable 'Offline' attribute for DIRECTORY or FILE + :ivar bool not_content_indexed: + Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE + :ivar bool no_scrub_data: + Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE + """ + def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, + temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): + + self.read_only = read_only + self.hidden = hidden + self.system = system + self.none = none + self.directory = directory + self.archive = archive + self.temporary = temporary + self.offline = offline + self.not_content_indexed = not_content_indexed + self.no_scrub_data = no_scrub_data + self._str = (('ReadOnly|' if self.read_only else '') + + ('Hidden|' if self.hidden else '') + + ('System|' if self.system else '') + + ('None|' if self.none else '') + + ('Directory|' if self.directory else '') + + ('Archive|' if self.archive else '') + + ('Temporary|' if self.temporary else '') + + ('Offline|' if self.offline else '') + + ('NotContentIndexed|' if self.not_content_indexed else '') + + ('NoScrubData|' if self.no_scrub_data else '')) + + def __str__(self): + concatenated_params = self._str + return concatenated_params.strip('|') + + @classmethod + def from_string(cls, string): + """Create a NTFSAttributes from a string. + + To specify permissions you can pass in a string with the + desired permissions, e.g. "ReadOnly|Hidden|System" + + :param str string: The string which dictates the permissions. + :return: A NTFSAttributes object + :rtype: ~azure.storage.fileshare.NTFSAttributes + """ + read_only = "ReadOnly" in string + hidden = "Hidden" in string + system = "System" in string + none = "None" in string + directory = "Directory" in string + archive = "Archive" in string + temporary = "Temporary" in string + offline = "Offline" in string + not_content_indexed = "NotContentIndexed" in string + no_scrub_data = "NoScrubData" in string + + parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, + no_scrub_data) + parsed._str = string # pylint: disable = protected-access + return parsed + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + } diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py new file mode 100644 index 0000000..db7cab5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py @@ -0,0 +1,42 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import datetime, timedelta + +_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' +_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ + 'please use file_permission_key' + + +def _get_file_permission(file_permission, file_permission_key, default_permission): + # if file_permission and file_permission_key are both empty, then use the default_permission + # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used + if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: + raise ValueError(_FILE_PERMISSION_TOO_LONG) + + if not file_permission: + if not file_permission_key: + return default_permission + return None + + if not file_permission_key: + return file_permission + + raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) + + +def _parse_datetime_from_str(string_datetime): + if not string_datetime: + return None + dt, _, us = string_datetime.partition(".") + dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") + us = int(us[:-2]) # microseconds + datetime_obj = dt + timedelta(microseconds=us) + return datetime_obj + + +def _datetime_to_str(datetime_obj): + return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py new file mode 100644 index 0000000..c075511 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py @@ -0,0 +1,111 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from azure.core import MatchConditions + +from ._parser import _datetime_to_str, _get_file_permission +from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-12-12' +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if etag_param in kwargs: + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + +def get_access_conditions(lease): + # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_smb_properties(kwargs): + # type: (Dict[str, Any]) -> Dict[str, Any] + ignore_read_only = kwargs.pop('ignore_read_only', None) + set_archive_attribute = kwargs.pop('set_archive_attribute', None) + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('permission_key', None) + file_attributes = kwargs.pop('file_attributes', None) + file_creation_time = kwargs.pop('file_creation_time', None) or "" + file_last_write_time = kwargs.pop('file_last_write_time', None) or "" + + file_permission_copy_mode = None + file_permission = _get_file_permission(file_permission, file_permission_key, None) + + if file_permission: + if file_permission.lower() == "source": + file_permission = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + elif file_permission_key: + if file_permission_key.lower() == "source": + file_permission_key = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + return { + 'file_permission': file_permission, + 'file_permission_key': file_permission_key, + 'copy_file_smb_info': CopyFileSmbInfo( + file_permission_copy_mode=file_permission_copy_mode, + ignore_read_only=ignore_read_only, + file_attributes=file_attributes, + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + set_archive_attribute=set_archive_attribute + ) + + } + +def get_api_version(kwargs, default): + # type: (Dict[str, Any]) -> str + api_version = kwargs.pop('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or default diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py new file mode 100644 index 0000000..765ec5e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py @@ -0,0 +1,705 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from ._generated import AzureFileStorage +from ._generated.version import VERSION +from ._generated.models import ( + StorageErrorException, + SignedIdentifier, + DeleteSnapshotsOptionType, + SharePermission) +from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission +from ._serialize import get_api_version +from ._directory_client import ShareDirectoryClient +from ._file_client import ShareFileClient + +if TYPE_CHECKING: + from ._models import ShareProperties, AccessPolicy + + +class ShareClient(StorageAccountHostsMixin): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + @classmethod + def from_share_url(cls, share_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareClient + """ + :param str share_url: The full URI to the share. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + """ + try: + if not share_url.lower().startswith('http'): + share_url = "https://" + share_url + except AttributeError: + raise ValueError("Share URL must be a string.") + parsed_url = urlparse(share_url.rstrip('/')) + if not (parsed_url.path and parsed_url.netloc): + raise ValueError("Invalid URL: {}".format(share_url)) + + share_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(share_path) > 1: + account_path = "/" + "/".join(share_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + + share_name = unquote(share_path[-1]) + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + if not share_name: + raise ValueError("Invalid URL. Please provide a URL with a valid share name") + return cls(account_url, share_name, path_snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + snapshot=None, # type: Optional[str] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareClient + """Create ShareClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str snapshot: + The optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_client_from_conn_string] + :end-before: [END create_share_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Gets the share client from connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode) + + @distributed_trace + def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 8 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 12 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 12 + :caption: Deletes the share and any snapshots. + """ + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + delete_snapshots=delete_include, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 12 + :caption: Gets the share properties. + """ + timeout = kwargs.pop('timeout', None) + try: + props = self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace + def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 12 + :caption: Sets the share quota. + """ + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.set_quota( # type: ignore + timeout=timeout, + quota=quota, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 12 + :caption: Sets the share metadata. + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + try: + return self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.share.get_statistics( + timeout=timeout, + **kwargs) + return stats.share_usage_bytes # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 12 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @staticmethod + def _create_permission_for_share_options(file_permission, # type: str + **kwargs): + options = { + 'share_permission': SharePermission(permission=file_permission), + 'cls': deserialize_permission_key, + 'timeout': kwargs.pop('timeout', None), + } + options.update(kwargs) + return options + + @distributed_trace + def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return self._client.share.create_permission(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace + def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py new file mode 100644 index 0000000..549e09f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py @@ -0,0 +1,409 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Dict, List, + TYPE_CHECKING +) +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.response_handlers import process_storage_error +from ._generated import AzureFileStorage +from ._generated.models import StorageErrorException, StorageServiceProperties +from ._generated.version import VERSION +from ._share_client import ShareClient +from ._serialize import get_api_version +from ._models import ( + SharePropertiesPaged, + service_properties_deserialize, +) + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ( + ShareProperties, + Metrics, + CorsRule, + ) + + +class ShareServiceClient(StorageAccountHostsMixin): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File Share service.") + + _, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> ShareServiceClient + """Create ShareServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A File Share service client. + :rtype: ~azure.storage.fileshare.ShareServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client_from_conn_string] + :end-before: [END create_share_service_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Create the share service client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 8 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors + ) + try: + self._client.service.set_properties(props, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ShareProperties] + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 12 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace + def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 8 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace + def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 12 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace + def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.ShareClient + """ + share = self.get_share_client(deleted_share_name) + + try: + share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except StorageErrorException as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, + _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py new file mode 100644 index 0000000..b11dc57 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py @@ -0,0 +1,140 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py new file mode 100644 index 0000000..14deea6 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py @@ -0,0 +1,437 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, + Optional, + Any, + Iterable, + Dict, + List, + Type, + Tuple, + TYPE_CHECKING, +) +import logging + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, + "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, + "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except StorageErrorException as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict(conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + except KeyError: + credential = conn_settings.get("SharedAccessSignature") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DefaultEndpointsProtocol"], + conn_settings["AccountName"], + service, + conn_settings["EndpointSuffix"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py new file mode 100644 index 0000000..d252ad0 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py @@ -0,0 +1,179 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except StorageErrorException as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py new file mode 100644 index 0000000..7fb05b5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated.version import VERSION + + +X_MS_VERSION = VERSION + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 2000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py new file mode 100644 index 0000000..aa31bfb --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py @@ -0,0 +1,468 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes + +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.fileshare.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.fileshare.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + parsed._str = permission # pylint: disable = protected-access + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.fileshare.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py new file mode 100644 index 0000000..c9bc798 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py @@ -0,0 +1,610 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py new file mode 100644 index 0000000..4f15b65 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py @@ -0,0 +1,147 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py new file mode 100644 index 0000000..ac526e5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py @@ -0,0 +1,159 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.location_mode, deserialized + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body.iter(): + if info.tag.lower() == 'code': + error_code = info.text + elif info.tag.lower() == 'message': + error_message = info.text + else: + additional_data[info.tag] = info.text + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py new file mode 100644 index 0000000..367c655 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py @@ -0,0 +1,209 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py new file mode 100644 index 0000000..abf3fb2 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py @@ -0,0 +1,550 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = self._upload_substream_block(block_id, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, block_id, block_stream): + try: + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py new file mode 100644 index 0000000..f6a8725 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py @@ -0,0 +1,351 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = await self._upload_substream_block(block_id, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, block_id, block_stream): + try: + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py new file mode 100644 index 0000000..20dad95 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py @@ -0,0 +1,491 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, List, TYPE_CHECKING +) + +from ._shared import sign_string +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants +from ._shared.parser import _str + +if TYPE_CHECKING: + from datetime import datetime + from .import ( + ResourceTypes, + AccountSasPermissions, + ShareSasPermissions, + FileSasPermissions + ) + +class FileSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating file and share access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + ''' + super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + + def generate_file(self, share_name, directory_name=None, file_name=None, + permission=None, expiry=None, start=None, policy_id=None, + ip=None, protocol=None, cache_control=None, + content_disposition=None, content_encoding=None, + content_language=None, content_type=None): + ''' + Generates a shared access signature for the file. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param str directory_name: + Name of directory. SAS tokens cannot be created for directories, so + this parameter should only be present if file_name is provided. + :param str file_name: + Name of file. + :param ~azure.storage.fileshare.FileSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = share_name + if directory_name is not None: + resource_path += '/' + _str(directory_name) if directory_name is not None else None + resource_path += '/' + _str(file_name) if file_name is not None else None + + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('f') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, resource_path) + + return sas.get_token() + + def generate_share(self, share_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the share. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param ShareSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('s') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, share_name) + + return sas.get_token() + + +class _FileSharedAccessHelper(_SharedAccessHelper): + + def add_resource_signature(self, account_name, account_key, path): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/file/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource + + get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for the file service. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ResourceTypes resource_types: + Specifies the resource types that are accessible with the account SAS. + :param ~azure.storage.fileshare.AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 8 + :caption: Generate a sas token. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(fileshare=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_share_sas( + account_name, # type: str + share_name, # type: str + account_key, # type: str + permission=None, # type: Optional[Union[ShareSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for a share. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ShareSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + return sas.generate_share( + share_name=share_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_file_sas( + account_name, # type: str + share_name, # type: str + file_path, # type: List[str] + account_key, # type: str + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param file_path: + The file path represented as a list of path segments, including the file name. + :type file_path: List[str] + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.FileSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + if len(file_path) > 1: + dir_path = '/'.join(file_path[:-1]) + else: + dir_path = None # type: ignore + return sas.generate_file( # type: ignore + share_name=share_name, + directory_name=dir_path, + file_name=file_path[-1], + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py new file mode 100644 index 0000000..dc78818 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.2.0" diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py new file mode 100644 index 0000000..73393b8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._file_client_async import ShareFileClient +from ._directory_client_async import ShareDirectoryClient +from ._share_client_async import ShareClient +from ._share_service_client_async import ShareServiceClient +from ._lease_async import ShareLeaseClient + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', +] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py new file mode 100644 index 0000000..29b6396 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py @@ -0,0 +1,593 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +import time +from typing import ( # pylint: disable=unused-import + Optional, Union, Any, Dict, TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _get_file_permission, _datetime_to_str +from .._shared.parser import _str + +from .._generated.aio import AzureFileStorage +from .._generated.version import VERSION +from .._generated.models import StorageErrorException +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_directory_properties +from .._serialize import get_api_version +from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase +from ._file_client_async import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes + from .._generated.models import HandleItem + + +class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + directory_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareDirectoryClient, self).__init__( + account_url, + share_name=share_name, + directory_path=directory_path, + snapshot=snapshot, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param str file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 16 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) + + @distributed_trace_async + async def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 16 + :caption: Creates a directory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.directory.create( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 16 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + await self._client.directory.delete(timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], Any) -> AsyncItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 16 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> AsyncItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace_async + async def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace_async + async def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return await self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 16 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace_async + async def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 16 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace_async + async def upload_file( + self, file_name, # type: str + data, # type: Any + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 16 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.upload_file( + data, + length=length, + **kwargs) + return file_client # type: ignore + + @distributed_trace_async + async def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 16 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py new file mode 100644 index 0000000..c0db16d --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py @@ -0,0 +1,467 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import asyncio +import sys +from io import BytesIO +from itertools import islice +import warnings + +from azure.core.exceptions import HttpResponseError +from .._shared.encryption import decrypt_blob +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._download import process_range_and_offset, _ChunkDownloader + + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = data.response.body() + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in file download stream.""" + + def __init__(self, size, content, downloader): + self.size = size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # If no iterator was supplied, the download completed with + # the initial GET, so we just return that data + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + else: + try: + chunk = next(self._iter_chunks) + except StopIteration: + raise StopAsyncIteration("Download complete") + self._current_content = await self._iter_downloader.yield_chunk(chunk) + + return self._current_content + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + try: + location_mode, response = await self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + return response + + def chunks(self): + """Iterate over chunks in the download stream. + + :rtype: Iterable[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader) + + async def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + _done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + await asyncio.wait(running_futures) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py new file mode 100644 index 0000000..3d48fdc --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py @@ -0,0 +1,1165 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method +import functools +import time +from io import BytesIO +from typing import Optional, Union, IO, List, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.async_paging import AsyncItemPaged + +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _datetime_to_str, _get_file_permission +from .._shared.parser import _str + +from .._generated.aio import AzureFileStorage +from .._generated.version import VERSION +from .._generated.models import StorageErrorException, FileHTTPHeaders +from .._shared.policies_async import ExponentialRetry +from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.request_handlers import add_metadata_headers, get_length +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_file_properties, deserialize_file_stream +from .._serialize import get_access_conditions, get_smb_properties, get_api_version +from .._file_client import ShareFileClient as ShareFileClientBase +from ._models import HandlesPaged +from ._lease_async import ShareLeaseClient +from ._download_async import StorageStreamDownloader + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes + from .._generated.models import HandleItem + + +async def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + **kwargs +): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = await client.create_file( + size, content_settings=content_settings, metadata=metadata, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + timeout=timeout, + **kwargs + ) + if size == 0: + return response + + responses = await upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except StorageErrorException as error: + process_storage_error(error) + + +class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): + """A client to interact with a specific file, although that file may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + + def __init__( # type: ignore + self, + account_url, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareFileClient, self).__init__( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, + credential=credential, loop=loop, **kwargs + ) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + @distributed_trace_async + async def acquire_lease(self, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> BlobLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_file( # type: ignore + self, + size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 16 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + try: + return await self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_file( + self, data, # type: Any + length=None, # type: Optional[int] + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Uploads a new file. + + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword str encoding: + Defaults to UTF-8. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 16 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + + if isinstance(data, six.text_type): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, "read"): + stream = data + elif hasattr(data, "__iter__"): + stream = IterStreamer(data, encoding=encoding) # type: ignore + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return await _upload_file_helper( # type: ignore + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + **kwargs + ) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 16 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return await self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id["copy_id"] + except TypeError: + pass + try: + await self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def download_file( + self, + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Iterable[bytes] + """Downloads a file to a stream with automatic chunking. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A iterable data generator (stream) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 16 + :caption: Download a file. + """ + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + downloader = StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + encryption_options=None, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs + ) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 16 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = await self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = "/".join(self.file_path) + return file_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop("size", None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.file.set_metadata( # type: ignore + metadata=metadata, lease_access_conditions=access_conditions, + timeout=timeout, cls=return_response_headers, headers=headers, **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range( # type: ignore + self, + data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if isinstance(data, six.text_type): + data = data.encode(encoding) + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return await self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.file.upload_range_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_ranges( # type: ignore + self, + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid ranges of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A list of valid ranges. + :rtype: List[dict[str, int]] + """ + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + content_range = None + if offset is not None: + if length is not None: + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = "bytes={0}-{1}".format(offset, end_range) + else: + content_range = "bytes={0}-".format(offset) + try: + ranges = await self._client.file.get_range_list( + range=content_range, + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + return [{"start": b.start, "end": b.end} for b in ranges] + + @distributed_trace_async + async def clear_range( # type: ignore + self, + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = "bytes={0}-{1}".format(offset, end_range) + try: + return await self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> AsyncItemPaged + """Lists handles for file. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop("results_per_page", None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py new file mode 100644 index 0000000..0a04484 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py @@ -0,0 +1,166 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._generated.models import ( + StorageErrorException) +from .._lease import ShareLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + + +class ShareLeaseClient(LeaseClientBase): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file to lease. + :type client: ~azure.storage.fileshare.aio.ShareFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, **kwargs): + # type: (int, Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file for write and delete operations. If the file does not have an active lease, + the File service creates a lease on the file. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file does not have an active lease, the File service creates a + lease on the file and returns a new lease ID. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=-1, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the file. Releasing the lease allows another client to immediately acquire the lease + for the file as soon as the release is complete. + + + The lease may be released if the client lease id specified matches + that associated with the file. Releasing the lease allows another client + to immediately acquire the lease for the file as soon as the release is complete. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, **kwargs): + # type: (Optional[int], Any) -> int + """Force breaks the lease if the file has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py new file mode 100644 index 0000000..affee8f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py @@ -0,0 +1,178 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator + +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error +from .._generated.models import StorageErrorException +from .._generated.models import DirectoryItem +from .._models import Handle, ShareProperties + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class SharePropertiesPaged(AsyncPageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class HandlesPaged(AsyncPageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryPropertiesPaged(AsyncPageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] + self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) + return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py new file mode 100644 index 0000000..b6fb243 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py @@ -0,0 +1,563 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.pipeline import AsyncPipeline +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from .._generated.aio import AzureFileStorage +from .._generated.version import VERSION +from .._generated.models import ( + StorageErrorException, + SignedIdentifier, + DeleteSnapshotsOptionType) +from .._deserialize import deserialize_share_properties, deserialize_permission +from .._serialize import get_api_version +from .._share_client import ShareClient as ShareClientBase +from ._directory_client_async import ShareDirectoryClient +from ._file_client_async import ShareFileClient + +if TYPE_CHECKING: + from .._models import ShareProperties, AccessPolicy + + +class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareClient, self).__init__( + account_url, + share_name=share_name, + snapshot=snapshot, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) + + @distributed_trace_async + async def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 12 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return await self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 16 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 16 + :caption: Deletes the share and any snapshots. + """ + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + await self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + delete_snapshots=delete_include, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world_async.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 16 + :caption: Gets the share properties. + """ + timeout = kwargs.pop('timeout', None) + try: + props = await self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace_async + async def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 16 + :caption: Sets the share quota. + """ + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.set_quota( # type: ignore + timeout=timeout, + quota=quota, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 16 + :caption: Sets the share metadata. + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + + try: + return await self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.share.get_statistics( + timeout=timeout, + **kwargs) + return stats.share_usage_bytes # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( # type: ignore + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 16 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @distributed_trace_async + async def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return await self._client.share.create_permission(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + await directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace_async + async def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py new file mode 100644 index 0000000..2ee8390 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py @@ -0,0 +1,362 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import process_storage_error +from .._shared.policies_async import ExponentialRetry +from .._generated.aio import AzureFileStorage +from .._generated.models import StorageErrorException, StorageServiceProperties +from .._generated.version import VERSION +from .._share_service_client import ShareServiceClient as ShareServiceClientBase +from .._serialize import get_api_version +from ._share_client_async import ShareClient +from ._models import SharePropertiesPaged +from .._models import service_properties_deserialize + +if TYPE_CHECKING: + from datetime import datetime + from .._shared.models import ResourceTypes, AccountSasPermissions + from .._models import ( + ShareProperties, + Metrics, + CorsRule, + ) + + +class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication_async.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareServiceClient, self).__init__( + account_url, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 12 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors + ) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs # type: Any + ): # type: (...) -> AsyncItemPaged + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 16 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace_async + async def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 12 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace_async + async def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 16 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace_async + async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.aio.ShareClient + """ + share = self.get_share_client(deleted_share_name) + try: + await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except StorageErrorException as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py index 8b65417..b81ced7 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py @@ -268,7 +268,8 @@ def _batch_send( policies=[ StorageHeadersPolicy(), self._credential_policy - ] + ], + enforce_https=False ) pipeline_response = self._pipeline.run( diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py index 3c806d7..1772251 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py @@ -124,7 +124,8 @@ async def _batch_send( policies=[ StorageHeadersPolicy(), self._credential_policy - ] + ], + enforce_https=False ) pipeline_response = await self._pipeline.run( diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py index b4a2f9e..2ba9ea4 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py @@ -322,7 +322,7 @@ def __init__(self, **kwargs): # pylint: disable=unused-argument @staticmethod def get_content_md5(data): - md5 = hashlib.md5() + md5 = hashlib.md5() #nosec if isinstance(data, bytes): md5.update(data) elif hasattr(data, 'read'): diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py index 2ce74d4..4f15b65 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py @@ -143,5 +143,5 @@ def add_metadata_headers(metadata=None): headers = {} if metadata: for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key)] = value + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value return headers diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py index 13b814e..623fa16 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py @@ -32,11 +32,11 @@ def _parallel_uploads(executor, uploader, pending, running): done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) range_ids.extend([chunk.result() for chunk in done]) try: - next_chunk = next(pending) + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) except StopIteration: break - else: - running.add(executor.submit(with_current_context(uploader), next_chunk)) # Wait for the remaining uploads to finish done, _running = futures.wait(running) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py index 92fcab5..fe68a2b 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py @@ -32,11 +32,11 @@ async def _parallel_uploads(uploader, pending, running): done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) range_ids.extend([chunk.result() for chunk in done]) try: - next_chunk = next(pending) + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) except StopIteration: break - else: - running.add(asyncio.ensure_future(uploader(next_chunk))) # Wait for the remaining uploads to finish if running: diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_version.py b/azure/multiapi/storagev2/queue/v2018_03_28/_version.py index 8c6cb2e..b8f9775 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_version.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_version.py @@ -9,4 +9,4 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "12.1.1" +VERSION = "12.1.2" diff --git a/scripts/updatev2_1.sh b/scripts/updatev2_1.sh index 133f526..105ac74 100755 --- a/scripts/updatev2_1.sh +++ b/scripts/updatev2_1.sh @@ -8,7 +8,7 @@ cd $workdir if [ ! -d venv ]; then python -m virtualenv venv . venv/bin/activate - pip install azure-storage-file-datalake azure-storage-blob azure-storage-file-share azure-storage-queue + pip install azure-storage-file-datalake azure-storage-blob azure-storage-file-share azure-storage-queue -U fi diff --git a/setup.py b/setup.py index 41ea599..1b50497 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup( name='azure-multiapi-storage', - version='0.4.0', + version='0.4.1', description='Microsoft Azure Storage Client Library for Python with multi API version support.', long_description=open('README.rst', 'r').read(), license='MIT',